repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
sysadmin75/ansible | test/units/module_utils/common/test_dict_transformations.py | 78 | 4363 | # -*- coding: utf-8 -*-
# (c) 2017, Will Thames <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from units.compat import unittest
from ansible.module_utils.common.dict_transformations import _camel_to_snake, _snake_to_camel, camel_dict_to_snake_dict, dict_merge
EXPECTED_SNAKIFICATION = {
'alllower': 'alllower',
'TwoWords': 'two_words',
'AllUpperAtEND': 'all_upper_at_end',
'AllUpperButPLURALs': 'all_upper_but_plurals',
'TargetGroupARNs': 'target_group_arns',
'HTTPEndpoints': 'http_endpoints',
'PLURALs': 'plurals'
}
EXPECTED_REVERSIBLE = {
'TwoWords': 'two_words',
'AllUpperAtEND': 'all_upper_at_e_n_d',
'AllUpperButPLURALs': 'all_upper_but_p_l_u_r_a_ls',
'TargetGroupARNs': 'target_group_a_r_ns',
'HTTPEndpoints': 'h_t_t_p_endpoints',
'PLURALs': 'p_l_u_r_a_ls'
}
class CamelToSnakeTestCase(unittest.TestCase):
def test_camel_to_snake(self):
for (k, v) in EXPECTED_SNAKIFICATION.items():
self.assertEqual(_camel_to_snake(k), v)
def test_reversible_camel_to_snake(self):
for (k, v) in EXPECTED_REVERSIBLE.items():
self.assertEqual(_camel_to_snake(k, reversible=True), v)
class SnakeToCamelTestCase(unittest.TestCase):
def test_snake_to_camel_reversed(self):
for (k, v) in EXPECTED_REVERSIBLE.items():
self.assertEqual(_snake_to_camel(v, capitalize_first=True), k)
class CamelToSnakeAndBackTestCase(unittest.TestCase):
def test_camel_to_snake_and_back(self):
for (k, v) in EXPECTED_REVERSIBLE.items():
self.assertEqual(_snake_to_camel(_camel_to_snake(k, reversible=True), capitalize_first=True), k)
class CamelDictToSnakeDictTestCase(unittest.TestCase):
def test_ignore_list(self):
camel_dict = dict(Hello=dict(One='one', Two='two'), World=dict(Three='three', Four='four'))
snake_dict = camel_dict_to_snake_dict(camel_dict, ignore_list='World')
self.assertEqual(snake_dict['hello'], dict(one='one', two='two'))
self.assertEqual(snake_dict['world'], dict(Three='three', Four='four'))
class DictMergeTestCase(unittest.TestCase):
def test_dict_merge(self):
base = dict(obj2=dict(), b1=True, b2=False, b3=False,
one=1, two=2, three=3, obj1=dict(key1=1, key2=2),
l1=[1, 3], l2=[1, 2, 3], l4=[4],
nested=dict(n1=dict(n2=2)))
other = dict(b1=True, b2=False, b3=True, b4=True,
one=1, three=4, four=4, obj1=dict(key1=2),
l1=[2, 1], l2=[3, 2, 1], l3=[1],
nested=dict(n1=dict(n2=2, n3=3)))
result = dict_merge(base, other)
# string assertions
self.assertTrue('one' in result)
self.assertTrue('two' in result)
self.assertEqual(result['three'], 4)
self.assertEqual(result['four'], 4)
# dict assertions
self.assertTrue('obj1' in result)
self.assertTrue('key1' in result['obj1'])
self.assertTrue('key2' in result['obj1'])
# list assertions
# this line differs from the network_utils/common test of the function of the
# same name as this method does not merge lists
self.assertEqual(result['l1'], [2, 1])
self.assertTrue('l2' in result)
self.assertEqual(result['l3'], [1])
self.assertTrue('l4' in result)
# nested assertions
self.assertTrue('obj1' in result)
self.assertEqual(result['obj1']['key1'], 2)
self.assertTrue('key2' in result['obj1'])
# bool assertions
self.assertTrue('b1' in result)
self.assertTrue('b2' in result)
self.assertTrue(result['b3'])
self.assertTrue(result['b4'])
| gpl-3.0 |
xanv/painindex | painindex_app/views.py | 1 | 1646 | import random
from django.shortcuts import get_object_or_404, render
from django.views.generic.edit import CreateView
from django.core.urlresolvers import reverse_lazy
from painindex_app.models import PainSource
from painindex_app.forms import PainReportForm
from django.http import HttpResponse
def homepage(request):
find_bugs = [PainSource.objects.select_random_in_range(i - 0.5, i + 0.5)
for i in range(10,0,-1)]
bugs = filter(None, find_bugs)
try:
highlighted_bug = random.choice(bugs)
# If bugs is empty, just pass an empty bug to the template, where it
# will be handled gracefully, rather than refusing to load the page.
except IndexError:
highlighted_bug = {}
content = {"find_bugs": find_bugs, "highlighted_bug": highlighted_bug}
return render(request, 'painindex_app/homepage.html', content)
def painsource_detail(request, painsource_id):
painsource = get_object_or_404(PainSource, pk=painsource_id)
return render(request, 'painindex_app/painsource_detail.html', {'painsource': painsource})
# def painreport_form(request):
# return render(request, 'painindex_app/painreport.html')
class PainReportView(CreateView):
form_class = PainReportForm
template_name = 'painindex_app/painreport.html'
# We probably want to change this:
success_url = reverse_lazy('painindex_app:painreport')
# This runs after form is found valid
def form_valid(self, form):
# Add any processing; for example, perhaps we want
# to run calc_rating on the PainSource that's just been updated.
return super(CreateView, self).form_valid(form) | mit |
grbd/GBD.Embedded.CMake4Mbed | bin/tools/LPCLink1/upload.py | 1 | 1174 | #!python3
"""
Script Wrapper for uploading firmware to LPC Expresso boards via LPCLink-1
"""
import sys, logging, argparse
from lpc_settings import LPCSettings
from pylib.logwrapper import LogWrapper
from pylib.process import Process
from os.path import abspath, dirname, join
try:
# Setup logging
LogWrapper.LogLevel = logging.DEBUG
LogWrapper.setup()
log = LogWrapper.getlogger()
# Function to Boot the LPC Board
def bootlpc():
log.info("Booting the LPC Board")
proc = Process()
proc.ExePath = Setts.bootbin
proc.Start()
ROOT = abspath(dirname(__file__))
# Load in the Settings
Setts = LPCSettings()
parser = argparse.ArgumentParser()
parser.add_argument("--bootdevice", action='store_true', default=False, help="Boot the LPCLink1 Device before upload")
args = parser.parse_args()
# Boot the LPC Link1 device if needed
if args.bootdevice == True:
bootlpc()
# Output any errors
except Exception as e:
log.critical (e)
if LogWrapper.LogLevel == logging.DEBUG:
import traceback
traceback.print_exc(file=sys.stdout)
sys.exit(1)
| apache-2.0 |
hbhzwj/imalse | tools/ns-allinone-3.14.1/ns-3.14.1/.waf-1.6.11-30618c54883417962c38f5d395f83584/waflib/Scripting.py | 11 | 10238 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os,shutil,traceback,errno,sys,stat
from waflib import Utils,Configure,Logs,Options,ConfigSet,Context,Errors,Build,Node
build_dir_override=None
no_climb_commands=['configure']
default_cmd="build"
def waf_entry_point(current_directory,version,wafdir):
Logs.init_log()
if Context.WAFVERSION!=version:
Logs.error('Waf script %r and library %r do not match (directory %r)'%(version,Context.WAFVERSION,wafdir))
sys.exit(1)
if'--version'in sys.argv:
Context.run_dir=current_directory
ctx=Context.create_context('options')
ctx.curdir=current_directory
ctx.parse_args()
sys.exit(0)
Context.waf_dir=wafdir
Context.launch_dir=current_directory
no_climb=os.environ.get('NOCLIMB',None)
if not no_climb:
for k in no_climb_commands:
if k in sys.argv:
no_climb=True
break
cur=current_directory
while cur:
lst=os.listdir(cur)
if Options.lockfile in lst:
env=ConfigSet.ConfigSet()
try:
env.load(os.path.join(cur,Options.lockfile))
ino=os.stat(cur)[stat.ST_INO]
except Exception:
pass
else:
for x in[env.run_dir,env.top_dir,env.out_dir]:
if Utils.is_win32:
if cur==x:
load=True
break
else:
try:
ino2=os.stat(x)[stat.ST_INO]
except:
pass
else:
if ino==ino2:
load=True
break
else:
Logs.warn('invalid lock file in %s'%cur)
load=False
if load:
Context.run_dir=env.run_dir
Context.top_dir=env.top_dir
Context.out_dir=env.out_dir
break
if not Context.run_dir:
if Context.WSCRIPT_FILE in lst:
Context.run_dir=cur
next=os.path.dirname(cur)
if next==cur:
break
cur=next
if no_climb:
break
if not Context.run_dir:
if'-h'in sys.argv or'--help'in sys.argv:
Logs.warn('No wscript file found: the help message may be incomplete')
Context.run_dir=current_directory
ctx=Context.create_context('options')
ctx.curdir=current_directory
ctx.parse_args()
sys.exit(0)
Logs.error('Waf: Run from a directory containing a file named %r'%Context.WSCRIPT_FILE)
sys.exit(1)
try:
os.chdir(Context.run_dir)
except OSError:
Logs.error('Waf: The folder %r is unreadable'%Context.run_dir)
sys.exit(1)
try:
set_main_module(Context.run_dir+os.sep+Context.WSCRIPT_FILE)
except Errors.WafError ,e:
Logs.pprint('RED',e.verbose_msg)
Logs.error(str(e))
sys.exit(1)
except Exception ,e:
Logs.error('Waf: The wscript in %r is unreadable'%Context.run_dir,e)
traceback.print_exc(file=sys.stdout)
sys.exit(2)
try:
run_commands()
except Errors.WafError ,e:
if Logs.verbose>1:
Logs.pprint('RED',e.verbose_msg)
Logs.error(e.msg)
sys.exit(1)
except Exception ,e:
traceback.print_exc(file=sys.stdout)
sys.exit(2)
except KeyboardInterrupt:
Logs.pprint('RED','Interrupted')
sys.exit(68)
def set_main_module(file_path):
Context.g_module=Context.load_module(file_path)
Context.g_module.root_path=file_path
def set_def(obj):
name=obj.__name__
if not name in Context.g_module.__dict__:
setattr(Context.g_module,name,obj)
for k in[update,dist,distclean,distcheck,update]:
set_def(k)
if not'init'in Context.g_module.__dict__:
Context.g_module.init=Utils.nada
if not'shutdown'in Context.g_module.__dict__:
Context.g_module.shutdown=Utils.nada
if not'options'in Context.g_module.__dict__:
Context.g_module.options=Utils.nada
def parse_options():
Context.create_context('options').execute()
if not Options.commands:
Options.commands=[default_cmd]
Options.commands=[x for x in Options.commands if x!='options']
Logs.verbose=Options.options.verbose
Logs.init_log()
if Options.options.zones:
Logs.zones=Options.options.zones.split(',')
if not Logs.verbose:
Logs.verbose=1
elif Logs.verbose>0:
Logs.zones=['runner']
if Logs.verbose>2:
Logs.zones=['*']
def run_command(cmd_name):
ctx=Context.create_context(cmd_name)
ctx.options=Options.options
ctx.cmd=cmd_name
ctx.execute()
return ctx
def run_commands():
parse_options()
run_command('init')
while Options.commands:
cmd_name=Options.commands.pop(0)
timer=Utils.Timer()
run_command(cmd_name)
if not Options.options.progress_bar:
elapsed=' (%s)'%str(timer)
Logs.info('%r finished successfully%s'%(cmd_name,elapsed))
run_command('shutdown')
def _can_distclean(name):
for k in'.o .moc .exe'.split():
if name.endswith(k):
return True
return False
def distclean_dir(dirname):
for(root,dirs,files)in os.walk(dirname):
for f in files:
if _can_distclean(f):
fname=root+os.sep+f
try:
os.unlink(fname)
except:
Logs.warn('could not remove %r'%fname)
for x in[Context.DBFILE,'config.log']:
try:
os.unlink(x)
except:
pass
try:
shutil.rmtree('c4che')
except:
pass
def distclean(ctx):
'''removes the build directory'''
lst=os.listdir('.')
for f in lst:
if f==Options.lockfile:
try:
proj=ConfigSet.ConfigSet(f)
except:
Logs.warn('could not read %r'%f)
continue
if proj['out_dir']!=proj['top_dir']:
try:
shutil.rmtree(proj['out_dir'])
except IOError:
pass
except OSError ,e:
if e.errno!=errno.ENOENT:
Logs.warn('project %r cannot be removed'%proj[Context.OUT])
else:
distclean_dir(proj['out_dir'])
for k in(proj['out_dir'],proj['top_dir'],proj['run_dir']):
try:
os.remove(os.path.join(k,Options.lockfile))
except OSError ,e:
if e.errno!=errno.ENOENT:
Logs.warn('file %r cannot be removed'%f)
if f.startswith('.waf')and not Options.commands:
shutil.rmtree(f,ignore_errors=True)
class Dist(Context.Context):
cmd='dist'
fun='dist'
algo='tar.bz2'
ext_algo={}
def execute(self):
self.recurse([os.path.dirname(Context.g_module.root_path)])
self.archive()
def archive(self):
import tarfile
arch_name=self.get_arch_name()
try:
self.base_path
except:
self.base_path=self.path
node=self.base_path.make_node(arch_name)
try:
node.delete()
except:
pass
files=self.get_files()
if self.algo.startswith('tar.'):
tar=tarfile.open(arch_name,'w:'+self.algo.replace('tar.',''))
for x in files:
self.add_tar_file(x,tar)
tar.close()
elif self.algo=='zip':
import zipfile
zip=zipfile.ZipFile(arch_name,'w',compression=zipfile.ZIP_DEFLATED)
for x in files:
archive_name=self.get_base_name()+'/'+x.path_from(self.base_path)
zip.write(x.abspath(),archive_name,zipfile.ZIP_DEFLATED)
zip.close()
else:
self.fatal('Valid algo types are tar.bz2, tar.gz or zip')
try:
from hashlib import sha1 as sha
except ImportError:
from sha import sha
try:
digest=" (sha=%r)"%sha(node.read()).hexdigest()
except:
digest=''
Logs.info('New archive created: %s%s'%(self.arch_name,digest))
def get_tar_path(self,node):
return node.abspath()
def add_tar_file(self,x,tar):
p=self.get_tar_path(x)
tinfo=tar.gettarinfo(name=p,arcname=self.get_tar_prefix()+'/'+x.path_from(self.base_path))
tinfo.uid=0
tinfo.gid=0
tinfo.uname='root'
tinfo.gname='root'
fu=None
try:
fu=open(p,'rb')
tar.addfile(tinfo,fileobj=fu)
finally:
if fu:
fu.close()
def get_tar_prefix(self):
try:
return self.tar_prefix
except:
return self.get_base_name()
def get_arch_name(self):
try:
self.arch_name
except:
self.arch_name=self.get_base_name()+'.'+self.ext_algo.get(self.algo,self.algo)
return self.arch_name
def get_base_name(self):
try:
self.base_name
except:
appname=getattr(Context.g_module,Context.APPNAME,'noname')
version=getattr(Context.g_module,Context.VERSION,'1.0')
self.base_name=appname+'-'+version
return self.base_name
def get_excl(self):
try:
return self.excl
except:
self.excl=Node.exclude_regs+' **/waf-1.6.* **/.waf-1.6* **/waf3-1.6.* **/.waf3-1.6* **/*~ **/*.rej **/*.orig **/*.pyc **/*.pyo **/*.bak **/*.swp **/.lock-w*'
nd=self.root.find_node(Context.out_dir)
if nd:
self.excl+=' '+nd.path_from(self.base_path)
return self.excl
def get_files(self):
try:
files=self.files
except:
files=self.base_path.ant_glob('**/*',excl=self.get_excl())
return files
def dist(ctx):
'''makes a tarball for redistributing the sources'''
pass
class DistCheck(Dist):
fun='distcheck'
cmd='distcheck'
def execute(self):
self.recurse([os.path.dirname(Context.g_module.root_path)])
self.archive()
self.check()
def check(self):
import tempfile,tarfile
t=None
try:
t=tarfile.open(self.get_arch_name())
for x in t:
t.extract(x)
finally:
if t:
t.close()
instdir=tempfile.mkdtemp('.inst',self.get_base_name())
ret=Utils.subprocess.Popen([sys.argv[0],'configure','install','uninstall','--destdir='+instdir],cwd=self.get_base_name()).wait()
if ret:
raise Errors.WafError('distcheck failed with code %i'%ret)
if os.path.exists(instdir):
raise Errors.WafError('distcheck succeeded, but files were left in %s'%instdir)
shutil.rmtree(self.get_base_name())
def distcheck(ctx):
'''checks if the project compiles (tarball from 'dist')'''
pass
def update(ctx):
'''updates the plugins from the *waflib/extras* directory'''
lst=Options.options.files.split(',')
if not lst:
lst=[x for x in Utils.listdir(Context.waf_dir+'/waflib/extras')if x.endswith('.py')]
for x in lst:
tool=x.replace('.py','')
try:
Configure.download_tool(tool,force=True,ctx=ctx)
except Errors.WafError:
Logs.error('Could not find the tool %s in the remote repository'%x)
def autoconfigure(execute_method):
def execute(self):
if not Configure.autoconfig:
return execute_method(self)
env=ConfigSet.ConfigSet()
do_config=False
try:
env.load(os.path.join(Context.top_dir,Options.lockfile))
except Exception:
Logs.warn('Configuring the project')
do_config=True
else:
if env.run_dir!=Context.run_dir:
do_config=True
else:
h=0
for f in env['files']:
h=hash((h,Utils.readf(f,'rb')))
do_config=h!=env.hash
if do_config:
Options.commands.insert(0,self.cmd)
Options.commands.insert(0,'configure')
return
return execute_method(self)
return execute
Build.BuildContext.execute=autoconfigure(Build.BuildContext.execute)
| gpl-3.0 |
ganeshnalawade/ansible-modules-core | network/nxos/nxos_bgp.py | 8 | 37282 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: nxos_bgp
version_added: "2.2"
short_description: Manages BGP configuration.
description:
- Manages BGP configurations on NX-OS switches.
author:
- Jason Edelman (@jedelman8)
- Gabriele Gerbino (@GGabriele)
extends_documentation_fragment: nxos
notes:
- C(state=absent) removes the whole BGP ASN configuration when
C(vrf=default) or the whole VRF instance within the BGP process when
using a different VRF.
- Default when supported restores params default value.
- Configuring global parmas is only permitted if C(vrf=default).
options:
asn:
description:
- BGP autonomous system number. Valid values are String,
Integer in ASPLAIN or ASDOT notation.
required: true
vrf:
description:
- Name of the VRF. The name 'default' is a valid VRF representing
the global BGP.
required: false
default: null
bestpath_always_compare_med:
description:
- Enable/Disable MED comparison on paths from different
autonomous systems.
required: false
choices: ['true','false']
default: null
bestpath_aspath_multipath_relax:
description:
- Enable/Disable load sharing across the providers with
different (but equal-length) AS paths.
required: false
choices: ['true','false']
default: null
bestpath_compare_routerid:
description:
- Enable/Disable comparison of router IDs for identical eBGP paths.
required: false
choices: ['true','false']
default: null
bestpath_cost_community_ignore:
description:
- Enable/Disable Ignores the cost community for BGP best-path
calculations.
required: false
choices: ['true','false']
default: null
bestpath_med_confed:
description:
- Enable/Disable enforcement of bestpath to do a MED comparison
only between paths originated within a confederation.
required: false
choices: ['true','false']
default: null
bestpath_med_missing_as_worst:
description:
- Enable/Disable assigns the value of infinity to received
routes that do not carry the MED attribute, making these routes
the least desirable.
required: false
choices: ['true','false']
default: null
bestpath_med_non_deterministic:
description:
- Enable/Disable deterministic selection of the best MED pat
from among the paths from the same autonomous system.
required: false
choices: ['true','false']
default: null
cluster_id:
description:
- Route Reflector Cluster-ID.
required: false
default: null
confederation_id:
description:
- Routing domain confederation AS.
required: false
default: null
confederation_peers:
description:
- AS confederation parameters.
required: false
default: null
disable_policy_batching:
description:
- Enable/Disable the batching evaluation of prefix advertisement
to all peers.
required: false
choices: ['true','false']
default: null
disable_policy_batching_ipv4_prefix_list:
description:
- Enable/Disable the batching evaluation of prefix advertisements
to all peers with prefix list.
required: false
default: null
disable_policy_batching_ipv6_prefix_list:
description:
- Enable/Disable the batching evaluation of prefix advertisements
to all peers with prefix list.
required: false
enforce_first_as:
description:
- Enable/Disable enforces the neighbor autonomous system to be
the first AS number listed in the AS path attribute for eBGP.
On NX-OS, this property is only supported in the
global BGP context.
required: false
choices: ['true','false']
default: null
event_history_cli:
description:
- Enable/Disable cli event history buffer.
required: false
choices: ['size_small', 'size_medium', 'size_large', 'size_disable', 'default']
default: null
event_history_detail:
description:
- Enable/Disable detail event history buffer.
required: false
choices: ['size_small', 'size_medium', 'size_large', 'size_disable', 'default']
default: null
event_history_events:
description:
- Enable/Disable event history buffer.
required: false
choices: ['size_small', 'size_medium', 'size_large', 'size_disable', 'default']
default: null
event_history_periodic:
description:
- Enable/Disable periodic event history buffer.
required: false
choices: ['size_small', 'size_medium', 'size_large', 'size_disable', 'default']
fast_external_fallover:
description:
- Enable/Disable immediately reset the session if the link to a
directly connected BGP peer goes down. Only supported in the
global BGP context.
required: false
choices: ['true','false']
default: null
flush_routes:
description:
- Enable/Disable flush routes in RIB upon controlled restart.
On NX-OS, this property is only supported in the global
BGP context.
required: false
choices: ['true','false']
default: null
graceful_restart:
description:
- Enable/Disable graceful restart.
required: false
choices: ['true','false']
default: null
graceful_restart_helper:
description:
- Enable/Disable graceful restart helper mode.
required: false
choices: ['true','false']
default: null
graceful_restart_timers_restart:
description:
- Set maximum time for a restart sent to the BGP peer.
required: false
choices: ['true','false']
default: null
graceful_restart_timers_stalepath_time:
description:
- Set maximum time that BGP keeps the stale routes from the
restarting BGP peer.
choices: ['true','false']
default: null
isolate:
description:
- Enable/Disable isolate this router from BGP perspective.
required: false
choices: ['true','false']
default: null
local_as:
description:
- Local AS number to be used within a VRF instance.
required: false
default: null
log_neighbor_changes:
description:
- Enable/Disable message logging for neighbor up/down event.
required: false
choices: ['true','false']
default: null
maxas_limit:
description:
- Specify Maximum number of AS numbers allowed in the AS-path
attribute. Valid values are between 1 and 512.
required: false
default: null
neighbor_down_fib_accelerate:
description:
- Enable/Disable handle BGP neighbor down event, due to
various reasons.
required: false
choices: ['true','false']
default: null
reconnect_interval:
description:
- The BGP reconnection interval for dropped sessions.
Valid values are between 1 and 60.
required: false
default: null
router_id:
description:
- Router Identifier (ID) of the BGP router VRF instance.
required: false
default: null
shutdown:
description:
- Administratively shutdown the BGP protocol.
required: false
choices: ['true','false']
default: null
suppress_fib_pending:
description:
- Enable/Disable advertise only routes programmed in hardware
to peers.
required: false
choices: ['true','false']
default: null
timer_bestpath_limit:
description:
- Specify timeout for the first best path after a restart,
in seconds.
required: false
default: null
timer_bestpath_limit_always:
description:
- Enable/Disable update-delay-always option.
required: false
choices: ['true','false']
default: null
timer_bgp_hold:
description:
- Set BGP hold timer.
required: false
default: null
timer_bgp_keepalive:
description:
- Set BGP keepalive timer.
required: false
default: null
state:
description:
- Determines whether the config should be present or not
on the device.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- name: Configure a simple ASN
nxos_bgp:
asn: 65535
vrf: test
router_id: 1.1.1.1
state: present
username: "{{ un }}"
password: "{{ pwd }}"
host: "{{ inventory_hostname }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: verbose mode
type: dict
sample: {"asn": "65535", "router_id": "1.1.1.1", "vrf": "test"}
existing:
description: k/v pairs of existing BGP configuration
returned: verbose mode
type: dict
sample: {"asn": "65535", "bestpath_always_compare_med": false,
"bestpath_aspath_multipath_relax": false,
"bestpath_compare_neighborid": false,
"bestpath_compare_routerid": false,
"bestpath_cost_community_ignore": false,
"bestpath_med_confed": false,
"bestpath_med_missing_as_worst": false,
"bestpath_med_non_deterministic": false, "cluster_id": "",
"confederation_id": "", "confederation_peers": "",
"graceful_restart": true, "graceful_restart_helper": false,
"graceful_restart_timers_restart": "120",
"graceful_restart_timers_stalepath_time": "300", "local_as": "",
"log_neighbor_changes": false, "maxas_limit": "",
"neighbor_down_fib_accelerate": false, "reconnect_interval": "60",
"router_id": "11.11.11.11", "suppress_fib_pending": false,
"timer_bestpath_limit": "", "timer_bgp_hold": "180",
"timer_bgp_keepalive": "60", "vrf": "test"}
end_state:
description: k/v pairs of BGP configuration after module execution
returned: verbose mode
type: dict
sample: {"asn": "65535", "bestpath_always_compare_med": false,
"bestpath_aspath_multipath_relax": false,
"bestpath_compare_neighborid": false,
"bestpath_compare_routerid": false,
"bestpath_cost_community_ignore": false,
"bestpath_med_confed": false,
"bestpath_med_missing_as_worst": false,
"bestpath_med_non_deterministic": false, "cluster_id": "",
"confederation_id": "", "confederation_peers": "",
"graceful_restart": true, "graceful_restart_helper": false,
"graceful_restart_timers_restart": "120",
"graceful_restart_timers_stalepath_time": "300", "local_as": "",
"log_neighbor_changes": false, "maxas_limit": "",
"neighbor_down_fib_accelerate": false, "reconnect_interval": "60",
"router_id": "1.1.1.1", "suppress_fib_pending": false,
"timer_bestpath_limit": "", "timer_bgp_hold": "180",
"timer_bgp_keepalive": "60", "vrf": "test"}
updates:
description: commands sent to the device
returned: always
type: list
sample: ["router bgp 65535", "vrf test", "router-id 1.1.1.1"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
# COMMON CODE FOR MIGRATION
import re
import ansible.module_utils.nxos
from ansible.module_utils.basic import get_exception
from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
from ansible.module_utils.network import NetworkModule
from ansible.module_utils.shell import ShellError
def to_list(val):
if isinstance(val, (list, tuple)):
return list(val)
elif val is not None:
return [val]
else:
return list()
class CustomNetworkConfig(NetworkConfig):
def expand_section(self, configobj, S=None):
if S is None:
S = list()
S.append(configobj)
for child in configobj.children:
if child in S:
continue
self.expand_section(child, S)
return S
def get_object(self, path):
for item in self.items:
if item.text == path[-1]:
parents = [p.text for p in item.parents]
if parents == path[:-1]:
return item
def to_block(self, section):
return '\n'.join([item.raw for item in section])
def get_section(self, path):
try:
section = self.get_section_objects(path)
return self.to_block(section)
except ValueError:
return list()
def get_section_objects(self, path):
if not isinstance(path, list):
path = [path]
obj = self.get_object(path)
if not obj:
raise ValueError('path does not exist in config')
return self.expand_section(obj)
def add(self, lines, parents=None):
"""Adds one or lines of configuration
"""
ancestors = list()
offset = 0
obj = None
## global config command
if not parents:
for line in to_list(lines):
item = ConfigLine(line)
item.raw = line
if item not in self.items:
self.items.append(item)
else:
for index, p in enumerate(parents):
try:
i = index + 1
obj = self.get_section_objects(parents[:i])[0]
ancestors.append(obj)
except ValueError:
# add parent to config
offset = index * self.indent
obj = ConfigLine(p)
obj.raw = p.rjust(len(p) + offset)
if ancestors:
obj.parents = list(ancestors)
ancestors[-1].children.append(obj)
self.items.append(obj)
ancestors.append(obj)
# add child objects
for line in to_list(lines):
# check if child already exists
for child in ancestors[-1].children:
if child.text == line:
break
else:
offset = len(parents) * self.indent
item = ConfigLine(line)
item.raw = line.rjust(len(line) + offset)
item.parents = ancestors
ancestors[-1].children.append(item)
self.items.append(item)
def get_network_module(**kwargs):
try:
return get_module(**kwargs)
except NameError:
return NetworkModule(**kwargs)
def get_config(module, include_defaults=False):
config = module.params['config']
if not config:
try:
config = module.get_config()
except AttributeError:
defaults = module.params['include_defaults']
config = module.config.get_config(include_defaults=defaults)
return CustomNetworkConfig(indent=2, contents=config)
def load_config(module, candidate):
config = get_config(module)
commands = candidate.difference(config)
commands = [str(c).strip() for c in commands]
save_config = module.params['save']
result = dict(changed=False)
if commands:
if not module.check_mode:
try:
module.configure(commands)
except AttributeError:
module.config(commands)
if save_config:
try:
module.config.save_config()
except AttributeError:
module.execute(['copy running-config startup-config'])
result['changed'] = True
result['updates'] = commands
return result
# END OF COMMON CODE
WARNINGS = []
BOOL_PARAMS = [
'bestpath_always_compare_med',
'bestpath_aspath_multipath_relax',
'bestpath_compare_neighborid',
'bestpath_compare_routerid',
'bestpath_cost_community_ignore',
'bestpath_med_confed',
'bestpath_med_missing_as_worst',
'bestpath_med_non_deterministic',
'disable_policy_batching',
'enforce_first_as',
'fast_external_fallover',
'flush_routes',
'graceful_restart',
'graceful_restart_helper',
'isolate',
'log_neighbor_changes',
'neighbor_down_fib_accelerate',
'shutdown',
'suppress_fib_pending'
]
GLOBAL_PARAMS = [
'disable_policy_batching',
'disable_policy_batching_ipv4_prefix_list',
'disable_policy_batching_ipv6_prefix_list',
'enforce_first_as',
'event_history_cli',
'event_history_detail',
'event_history_events',
'event_history_periodic',
'fast_external_fallover',
'flush_routes',
'isolate',
'shutdown'
]
PARAM_TO_DEFAULT_KEYMAP = {
'timer_bgp_keepalive': '60',
'timer_bgp_hold': '180',
'graceful_restart': True,
'graceful_restart_timers_restart': '120',
'graceful_restart_timers_stalepath_time': '300',
'reconnect_interval': '60',
'suppress_fib_pending': True,
'fast_external_fallover': True,
'enforce_first_as': True,
'event_history_periodic': True,
'event_history_cli': True,
'event_history_events': True
}
PARAM_TO_COMMAND_KEYMAP = {
'asn': 'router bgp',
'bestpath_always_compare_med': 'bestpath always-compare-med',
'bestpath_aspath_multipath_relax': 'bestpath as-path multipath-relax',
'bestpath_compare_neighborid': 'bestpath compare-neighborid',
'bestpath_compare_routerid': 'bestpath compare-routerid',
'bestpath_cost_community_ignore': 'bestpath cost-community ignore',
'bestpath_med_confed': 'bestpath med confed',
'bestpath_med_missing_as_worst': 'bestpath med missing-as-worst',
'bestpath_med_non_deterministic': 'bestpath med non-deterministic',
'cluster_id': 'cluster-id',
'confederation_id': 'confederation identifier',
'confederation_peers': 'confederation peers',
'disable_policy_batching': 'disable-policy-batching',
'disable_policy_batching_ipv4_prefix_list': 'disable-policy-batching ipv4 prefix-list',
'disable_policy_batching_ipv6_prefix_list': 'disable-policy-batching ipv6 prefix-list',
'enforce_first_as': 'enforce-first-as',
'event_history_cli': 'event-history cli',
'event_history_detail': 'event-history detail',
'event_history_events': 'event-history events',
'event_history_periodic': 'event-history periodic',
'fast_external_fallover': 'fast-external-fallover',
'flush_routes': 'flush-routes',
'graceful_restart': 'graceful-restart',
'graceful_restart_helper': 'graceful-restart-helper',
'graceful_restart_timers_restart': 'graceful-restart restart-time',
'graceful_restart_timers_stalepath_time': 'graceful-restart stalepath-time',
'isolate': 'isolate',
'local_as': 'local-as',
'log_neighbor_changes': 'log-neighbor-changes',
'maxas_limit': 'maxas-limit',
'neighbor_down_fib_accelerate': 'neighbor-down fib-accelerate',
'reconnect_interval': 'reconnect-interval',
'router_id': 'router-id',
'shutdown': 'shutdown',
'suppress_fib_pending': 'suppress-fib-pending',
'timer_bestpath_limit': 'timers bestpath-limit',
'timer_bgp_hold': 'timers bgp',
'timer_bgp_keepalive': 'timers bgp',
'vrf': 'vrf'
}
def invoke(name, *args, **kwargs):
func = globals().get(name)
if func:
return func(*args, **kwargs)
def get_custom_value(config, arg):
if arg.startswith('event_history'):
REGEX_SIZE = re.compile(r'(?:{0} size\s)(?P<value>.*)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
REGEX = re.compile(r'\s+{0}\s*$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
value = False
if 'no {0}'.format(PARAM_TO_COMMAND_KEYMAP[arg]) in config:
pass
elif PARAM_TO_COMMAND_KEYMAP[arg] in config:
try:
value = REGEX_SIZE.search(config).group('value')
except AttributeError:
if REGEX.search(config):
value = True
elif arg == 'enforce_first_as' or arg == 'fast_external_fallover':
REGEX = re.compile(r'no\s+{0}\s*$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
value = True
try:
if REGEX.search(config):
value = False
except TypeError:
value = True
elif arg == 'confederation_peers':
REGEX = re.compile(r'(?:confederation peers\s)(?P<value>.*)$', re.M)
value = ''
if 'confederation peers' in config:
value = REGEX.search(config).group('value').split()
elif arg == 'timer_bgp_keepalive':
REGEX = re.compile(r'(?:timers bgp\s)(?P<value>.*)$', re.M)
value = ''
if 'timers bgp' in config:
parsed = REGEX.search(config).group('value').split()
value = parsed[0]
elif arg == 'timer_bgp_hold':
REGEX = re.compile(r'(?:timers bgp\s)(?P<value>.*)$', re.M)
value = ''
if 'timers bgp' in config:
parsed = REGEX.search(config).group('value').split()
if len(parsed) == 2:
value = parsed[1]
return value
def get_value(arg, config):
custom = [
'event_history_cli',
'event_history_events',
'event_history_periodic',
'event_history_detail',
'confederation_peers',
'timer_bgp_hold',
'timer_bgp_keepalive',
'enforce_first_as',
'fast_external_fallover'
]
if arg in custom:
value = get_custom_value(config, arg)
elif arg in BOOL_PARAMS:
REGEX = re.compile(r'\s+{0}\s*$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
value = False
try:
if REGEX.search(config):
value = True
except TypeError:
value = False
else:
REGEX = re.compile(r'(?:{0}\s)(?P<value>.*)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
value = ''
if PARAM_TO_COMMAND_KEYMAP[arg] in config:
value = REGEX.search(config).group('value')
return value
def get_existing(module, args):
existing = {}
netcfg = get_config(module)
try:
asn_regex = '.*router\sbgp\s(?P<existing_asn>\d+).*'
match_asn = re.match(asn_regex, str(netcfg), re.DOTALL)
existing_asn_group = match_asn.groupdict()
existing_asn = existing_asn_group['existing_asn']
except AttributeError:
existing_asn = ''
if existing_asn:
bgp_parent = 'router bgp {0}'.format(existing_asn)
if module.params['vrf'] != 'default':
parents = [bgp_parent, 'vrf {0}'.format(module.params['vrf'])]
else:
parents = [bgp_parent]
config = netcfg.get_section(parents)
if config:
for arg in args:
if arg != 'asn':
if module.params['vrf'] != 'default':
if arg not in GLOBAL_PARAMS:
existing[arg] = get_value(arg, config)
else:
existing[arg] = get_value(arg, config)
existing['asn'] = existing_asn
if module.params['vrf'] == 'default':
existing['vrf'] = 'default'
else:
if (module.params['state'] == 'present' and
module.params['vrf'] != 'default'):
msg = ("VRF {0} doesn't exist. ".format(module.params['vrf']))
WARNINGS.append(msg)
else:
if (module.params['state'] == 'present' and
module.params['vrf'] != 'default'):
msg = ("VRF {0} doesn't exist. ".format(module.params['vrf']))
WARNINGS.append(msg)
return existing
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
value = table.get(key)
if value:
new_dict[new_key] = value
else:
new_dict[new_key] = value
return new_dict
def state_present(module, existing, proposed, candidate):
commands = list()
proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed)
existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing)
for key, value in proposed_commands.iteritems():
if value is True:
commands.append(key)
elif value is False:
commands.append('no {0}'.format(key))
elif value == 'default':
if key in PARAM_TO_DEFAULT_KEYMAP:
commands.append('{0} {1}'.format(key, PARAM_TO_DEFAULT_KEYMAP[key]))
elif existing_commands.get(key):
existing_value = existing_commands.get(key)
if key == 'confederation peers':
commands.append('no {0} {1}'.format(key, ' '.join(existing_value)))
else:
commands.append('no {0} {1}'.format(key, existing_value))
else:
if key == 'confederation peers':
existing_confederation_peers = existing.get('confederation_peers')
if existing_confederation_peers:
if not isinstance(existing_confederation_peers, list):
existing_confederation_peers = [existing_confederation_peers]
else:
existing_confederation_peers = []
values = value.split()
for each_value in values:
if each_value not in existing_confederation_peers:
existing_confederation_peers.append(each_value)
peer_string = ' '.join(existing_confederation_peers)
commands.append('{0} {1}'.format(key, peer_string))
elif key.startswith('timers bgp'):
command = 'timers bgp {0} {1}'.format(
proposed['timer_bgp_keepalive'],
proposed['timer_bgp_hold'])
if command not in commands:
commands.append(command)
else:
if value.startswith('size'):
value = value.replace('_', ' ')
command = '{0} {1}'.format(key, value)
commands.append(command)
if commands:
commands = fix_commands(commands)
parents = ['router bgp {0}'.format(module.params['asn'])]
if module.params['vrf'] != 'default':
parents.append('vrf {0}'.format(module.params['vrf']))
candidate.add(commands, parents=parents)
else:
if len(proposed.keys()) == 0:
if module.params['vrf'] != 'default':
commands.append('vrf {0}'.format(module.params['vrf']))
parents = ['router bgp {0}'.format(module.params['asn'])]
else:
commands.append('router bgp {0}'.format(module.params['asn']))
parents = []
candidate.add(commands, parents=parents)
def state_absent(module, existing, proposed, candidate):
commands = []
parents = []
if module.params['vrf'] == 'default':
commands.append('no router bgp {0}'.format(module.params['asn']))
else:
if existing.get('vrf') == module.params['vrf']:
commands.append('no vrf {0}'.format(module.params['vrf']))
parents = ['router bgp {0}'.format(module.params['asn'])]
candidate.add(commands, parents=parents)
def fix_commands(commands):
local_as_command = ''
confederation_id_command = ''
confederation_peers_command = ''
for command in commands:
if 'local-as' in command:
local_as_command = command
elif 'confederation identifier' in command:
confederation_id_command = command
elif 'confederation peers' in command:
confederation_peers_command = command
if local_as_command and confederation_id_command:
commands.pop(commands.index(local_as_command))
commands.pop(commands.index(confederation_id_command))
commands.append(local_as_command)
commands.append(confederation_id_command)
elif confederation_peers_command and confederation_id_command:
commands.pop(commands.index(confederation_peers_command))
commands.pop(commands.index(confederation_id_command))
commands.append(confederation_id_command)
commands.append(confederation_peers_command)
return commands
def main():
argument_spec = dict(
asn=dict(required=True, type='str'),
vrf=dict(required=False, type='str', default='default'),
bestpath_always_compare_med=dict(required=False, type='bool'),
bestpath_aspath_multipath_relax=dict(required=False, type='bool'),
bestpath_compare_neighborid=dict(required=False, type='bool'),
bestpath_compare_routerid=dict(required=False, type='bool'),
bestpath_cost_community_ignore=dict(required=False, type='bool'),
bestpath_med_confed=dict(required=False, type='bool'),
bestpath_med_missing_as_worst=dict(required=False, type='bool'),
bestpath_med_non_deterministic=dict(required=False, type='bool'),
cluster_id=dict(required=False, type='str'),
confederation_id=dict(required=False, type='str'),
confederation_peers=dict(required=False, type='str'),
disable_policy_batching=dict(required=False, type='bool'),
disable_policy_batching_ipv4_prefix_list=dict(required=False, type='str'),
disable_policy_batching_ipv6_prefix_list=dict(required=False, type='str'),
enforce_first_as=dict(required=False, type='bool'),
event_history_cli=dict(required=False, choices=['true', 'false', 'default', 'size_small', 'size_medium', 'size_large', 'size_disable']),
event_history_detail=dict(required=False, choices=['true', 'false', 'default', 'size_small', 'size_medium', 'size_large', 'size_disable']),
event_history_events=dict(required=False, choices=['true', 'false', 'default' 'size_small', 'size_medium', 'size_large', 'size_disable']),
event_history_periodic=dict(required=False, choices=['true', 'false', 'default', 'size_small', 'size_medium', 'size_large', 'size_disable']),
fast_external_fallover=dict(required=False, type='bool'),
flush_routes=dict(required=False, type='bool'),
graceful_restart=dict(required=False, type='bool'),
graceful_restart_helper=dict(required=False, type='bool'),
graceful_restart_timers_restart=dict(required=False, type='str'),
graceful_restart_timers_stalepath_time=dict(required=False, type='str'),
isolate=dict(required=False, type='bool'),
local_as=dict(required=False, type='str'),
log_neighbor_changes=dict(required=False, type='bool'),
maxas_limit=dict(required=False, type='str'),
neighbor_down_fib_accelerate=dict(required=False, type='bool'),
reconnect_interval=dict(required=False, type='str'),
router_id=dict(required=False, type='str'),
shutdown=dict(required=False, type='bool'),
suppress_fib_pending=dict(required=False, type='bool'),
timer_bestpath_limit=dict(required=False, type='str'),
timer_bgp_hold=dict(required=False, type='str'),
timer_bgp_keepalive=dict(required=False, type='str'),
state=dict(choices=['present', 'absent'], default='present',
required=False),
include_defaults=dict(default=True),
config=dict(),
save=dict(type='bool', default=False)
)
module = get_network_module(argument_spec=argument_spec,
required_together=[['timer_bgp_hold',
'timer_bgp_keepalive']],
supports_check_mode=True)
state = module.params['state']
args = [
"asn",
"bestpath_always_compare_med",
"bestpath_aspath_multipath_relax",
"bestpath_compare_neighborid",
"bestpath_compare_routerid",
"bestpath_cost_community_ignore",
"bestpath_med_confed",
"bestpath_med_missing_as_worst",
"bestpath_med_non_deterministic",
"cluster_id",
"confederation_id",
"confederation_peers",
"disable_policy_batching",
"disable_policy_batching_ipv4_prefix_list",
"disable_policy_batching_ipv6_prefix_list",
"enforce_first_as",
"event_history_cli",
"event_history_detail",
"event_history_events",
"event_history_periodic",
"fast_external_fallover",
"flush_routes",
"graceful_restart",
"graceful_restart_helper",
"graceful_restart_timers_restart",
"graceful_restart_timers_stalepath_time",
"isolate",
"local_as",
"log_neighbor_changes",
"maxas_limit",
"neighbor_down_fib_accelerate",
"reconnect_interval",
"router_id",
"shutdown",
"suppress_fib_pending",
"timer_bestpath_limit",
"timer_bgp_hold",
"timer_bgp_keepalive",
"vrf"
]
if module.params['vrf'] != 'default':
for param, inserted_value in module.params.iteritems():
if param in GLOBAL_PARAMS and inserted_value:
module.fail_json(msg='Global params can be modified only'
' under "default" VRF.',
vrf=module.params['vrf'],
global_param=param)
existing = invoke('get_existing', module, args)
if existing.get('asn'):
if (existing.get('asn') != module.params['asn'] and
state == 'present'):
module.fail_json(msg='Another BGP ASN already exists.',
proposed_asn=module.params['asn'],
existing_asn=existing.get('asn'))
end_state = existing
proposed_args = dict((k, v) for k, v in module.params.iteritems()
if v is not None and k in args)
proposed = {}
for key, value in proposed_args.iteritems():
if key != 'asn' and key != 'vrf':
if str(value).lower() == 'default':
value = PARAM_TO_DEFAULT_KEYMAP.get(key)
if value is None:
value = 'default'
if existing.get(key) or (not existing.get(key) and value):
proposed[key] = value
result = {}
if (state == 'present' or (state == 'absent' and
existing.get('asn') == module.params['asn'])):
candidate = CustomNetworkConfig(indent=3)
invoke('state_%s' % state, module, existing, proposed, candidate)
try:
response = load_config(module, candidate)
result.update(response)
except ShellError:
exc = get_exception()
module.fail_json(msg=str(exc))
else:
result['updates'] = []
result['connected'] = module.connected
if module._verbosity > 0:
end_state = invoke('get_existing', module, args)
result['end_state'] = end_state
result['existing'] = existing
result['proposed'] = proposed_args
if WARNINGS:
result['warnings'] = WARNINGS
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
famulus/aubio | python.old/aubio/task/notes.py | 13 | 4192 |
from aubio.task import task
from aubio.aubioclass import *
class tasknotes(task):
def __init__(self,input,output=None,params=None):
task.__init__(self,input,params=params)
self.opick = onsetpick(self.params.bufsize,
self.params.hopsize,
self.channels,
self.myvec,
self.params.threshold,
mode=self.params.onsetmode,
dcthreshold=self.params.dcthreshold,
derivate=self.params.derivate)
self.pitchdet = pitch(mode=self.params.pitchmode,
bufsize=self.params.pbufsize,
hopsize=self.params.phopsize,
channels=self.channels,
samplerate=self.srate,
omode=self.params.omode)
self.olist = []
self.ofunc = []
self.maxofunc = 0
self.last = -1000
self.oldifreq = 0
if self.params.localmin:
self.ovalist = [0., 0., 0., 0., 0.]
def __call__(self):
from aubio.median import short_find
task.__call__(self)
isonset,val = self.opick.do(self.myvec)
if (aubio_silence_detection(self.myvec(),self.params.silence)):
isonset=0
freq = -1.
else:
freq = self.pitchdet(self.myvec)
minpitch = self.params.pitchmin
maxpitch = self.params.pitchmax
if maxpitch and freq > maxpitch :
freq = -1.
elif minpitch and freq < minpitch :
freq = -1.
freq = aubio_freqtomidi(freq)
if self.params.pitchsmooth:
self.shortlist.append(freq)
self.shortlist.pop(0)
smoothfreq = short_find(self.shortlist,
len(self.shortlist)/2)
freq = smoothfreq
now = self.frameread
ifreq = int(round(freq))
if self.oldifreq == ifreq:
self.oldifreq = ifreq
else:
self.oldifreq = ifreq
ifreq = 0
# take back delay
if self.params.delay != 0.: now -= self.params.delay
if now < 0 :
now = 0
if (isonset == 1):
if self.params.mintol:
# prune doubled
if (now - self.last) > self.params.mintol:
self.last = now
return now, 1, freq, ifreq
else:
return now, 0, freq, ifreq
else:
return now, 1, freq, ifreq
else:
return now, 0, freq, ifreq
def fprint(self,foo):
print self.params.step*foo[0], foo[1], foo[2], foo[3]
def compute_all(self):
""" Compute data """
now, onset, freq, ifreq = [], [], [], []
while(self.readsize==self.params.hopsize):
n, o, f, i = self()
now.append(n*self.params.step)
onset.append(o)
freq.append(f)
ifreq.append(i)
if self.params.verbose:
self.fprint((n,o,f,i))
return now, onset, freq, ifreq
def plot(self,now,onset,freq,ifreq,oplots):
import Gnuplot
oplots.append(Gnuplot.Data(now,freq,with_='lines',
title=self.params.pitchmode))
oplots.append(Gnuplot.Data(now,ifreq,with_='lines',
title=self.params.pitchmode))
temponsets = []
for i in onset:
temponsets.append(i*1000)
oplots.append(Gnuplot.Data(now,temponsets,with_='impulses',
title=self.params.pitchmode))
def plotplot(self,wplot,oplots,outplot=None,multiplot = 0):
from aubio.gnuplot import gnuplot_init, audio_to_array, make_audio_plot
import re
import Gnuplot
# audio data
time,data = audio_to_array(self.input)
f = make_audio_plot(time,data)
# check if ground truth exists
#timet,pitcht = self.gettruth()
#if timet and pitcht:
# oplots = [Gnuplot.Data(timet,pitcht,with_='lines',
# title='ground truth')] + oplots
t = Gnuplot.Data(0,0,with_='impulses')
g = gnuplot_init(outplot)
g('set title \'%s\'' % (re.sub('.*/','',self.input)))
g('set multiplot')
# hack to align left axis
g('set lmargin 15')
# plot waveform and onsets
g('set size 1,0.3')
g('set origin 0,0.7')
g('set xrange [0:%f]' % max(time))
g('set yrange [-1:1]')
g.ylabel('amplitude')
g.plot(f)
g('unset title')
# plot onset detection function
g('set size 1,0.7')
g('set origin 0,0')
g('set xrange [0:%f]' % max(time))
g('set yrange [20:100]')
g('set key right top')
g('set noclip one')
#g('set format x ""')
#g('set log y')
#g.xlabel('time (s)')
g.ylabel('f0 (Hz)')
if multiplot:
for i in range(len(oplots)):
# plot onset detection functions
g('set size 1,%f' % (0.7/(len(oplots))))
g('set origin 0,%f' % (float(i)*0.7/(len(oplots))))
g('set xrange [0:%f]' % max(time))
g.plot(oplots[i])
else:
g.plot(*oplots)
#g('unset multiplot')
| gpl-3.0 |
theheros/kbengine | kbe/res/scripts/common/Lib/importlib/test/builtin/test_loader.py | 3 | 3295 | import importlib
from importlib import machinery
from .. import abc
from .. import util
from . import util as builtin_util
import sys
import types
import unittest
class LoaderTests(abc.LoaderTests):
"""Test load_module() for built-in modules."""
verification = {'__name__': 'errno', '__package__': '',
'__loader__': machinery.BuiltinImporter}
def verify(self, module):
"""Verify that the module matches against what it should have."""
self.assertTrue(isinstance(module, types.ModuleType))
for attr, value in self.verification.items():
self.assertEqual(getattr(module, attr), value)
self.assertTrue(module.__name__ in sys.modules)
load_module = staticmethod(lambda name:
machinery.BuiltinImporter.load_module(name))
def test_module(self):
# Common case.
with util.uncache(builtin_util.NAME):
module = self.load_module(builtin_util.NAME)
self.verify(module)
def test_package(self):
# Built-in modules cannot be a package.
pass
def test_lacking_parent(self):
# Built-in modules cannot be a package.
pass
def test_state_after_failure(self):
# Not way to force an imoprt failure.
pass
def test_module_reuse(self):
# Test that the same module is used in a reload.
with util.uncache(builtin_util.NAME):
module1 = self.load_module(builtin_util.NAME)
module2 = self.load_module(builtin_util.NAME)
self.assertTrue(module1 is module2)
def test_unloadable(self):
name = 'dssdsdfff'
assert name not in sys.builtin_module_names
with self.assertRaises(ImportError):
self.load_module(name)
def test_already_imported(self):
# Using the name of a module already imported but not a built-in should
# still fail.
assert hasattr(importlib, '__file__') # Not a built-in.
with self.assertRaises(ImportError):
self.load_module('importlib')
class InspectLoaderTests(unittest.TestCase):
"""Tests for InspectLoader methods for BuiltinImporter."""
def test_get_code(self):
# There is no code object.
result = machinery.BuiltinImporter.get_code(builtin_util.NAME)
self.assertTrue(result is None)
def test_get_source(self):
# There is no source.
result = machinery.BuiltinImporter.get_source(builtin_util.NAME)
self.assertTrue(result is None)
def test_is_package(self):
# Cannot be a package.
result = machinery.BuiltinImporter.is_package(builtin_util.NAME)
self.assertTrue(not result)
def test_not_builtin(self):
# Modules not built-in should raise ImportError.
for meth_name in ('get_code', 'get_source', 'is_package'):
method = getattr(machinery.BuiltinImporter, meth_name)
with self.assertRaises(ImportError):
method(builtin_util.BAD_NAME)
def test_main():
from test.support import run_unittest
run_unittest(LoaderTests, InspectLoaderTests)
if __name__ == '__main__':
test_main()
| lgpl-3.0 |
ronekko/chainer | chainer/utils/__init__.py | 2 | 1537 | import contextlib
import shutil
import tempfile
import numpy
# import classes and functions
from chainer.utils.array import sum_to # NOQA
from chainer.utils.conv import get_conv_outsize # NOQA
from chainer.utils.conv import get_deconv_outsize # NOQA
from chainer.utils.experimental import experimental # NOQA
from chainer.utils.sparse import CooMatrix # NOQA
from chainer.utils.sparse import to_coo # NOQA
from chainer.utils.walker_alias import WalkerAlias # NOQA
def force_array(x, dtype=None):
# numpy returns a float value (scalar) when a return value of an operator
# is a 0-dimension array.
# We need to convert such a value to a 0-dimension array because `Function`
# object needs to return an `numpy.ndarray`.
if numpy.isscalar(x):
if dtype is None:
return numpy.array(x)
else:
return numpy.array(x, dtype)
else:
if dtype is None:
return x
else:
return x.astype(dtype, copy=False)
def force_type(dtype, value):
if numpy.isscalar(value):
return dtype.type(value)
elif value.dtype != dtype:
return value.astype(dtype, copy=False)
else:
return value
@contextlib.contextmanager
def tempdir(**kwargs):
# A context manager that defines a lifetime of a temporary directory.
ignore_errors = kwargs.pop('ignore_errors', False)
temp_dir = tempfile.mkdtemp(**kwargs)
try:
yield temp_dir
finally:
shutil.rmtree(temp_dir, ignore_errors=ignore_errors)
| mit |
dharmabumstead/ansible | lib/ansible/modules/utilities/logic/import_role.py | 2 | 2570 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'core'
}
DOCUMENTATION = '''
---
author: Ansible Core Team (@ansible)
module: import_role
short_description: Import a role into a play
description:
- Much like the `roles:` keyword, this task loads a role, but it allows you to control it when the role tasks run in
between other tasks of the play.
- Most keywords, loops and conditionals will only be applied to the imported tasks, not to this statement itself. If
you want the opposite behavior, use M(include_role) instead. To better understand the difference you can read
the L(Including and Importing Guide,../user_guide/playbooks_reuse_includes.html).
version_added: "2.4"
options:
name:
description:
- The name of the role to be executed.
required: True
tasks_from:
description:
- File to load from a role's C(tasks/) directory.
default: main
vars_from:
description:
- File to load from a role's C(vars/) directory.
default: main
defaults_from:
description:
- File to load from a role's C(defaults/) directory.
default: main
allow_duplicates:
description:
- Overrides the role's metadata setting to allow using a role more than once with the same parameters.
type: bool
default: 'yes'
private:
description:
- If C(yes) the variables from C(defaults/) and C(vars/) in a role will not be made available to the rest of the
play.
type: bool
notes:
- Handlers are made available to the whole play.
'''
EXAMPLES = """
- hosts: all
tasks:
- import_role:
name: myrole
- name: Run tasks/other.yaml instead of 'main'
import_role:
name: myrole
tasks_from: other
- name: Pass variables to role
import_role:
name: myrole
vars:
rolevar1: value from task
- name: Apply loop to each task in role
import_role:
name: myrole
with_items:
- '{{ roleinput1 }}'
- '{{ roleinput2 }}'
loop_control:
loop_var: roleinputvar
- name: Apply condition to each task in role
import_role:
name: myrole
when: not idontwanttorun
"""
RETURN = """
# This module does not return anything except tasks to execute.
"""
| gpl-3.0 |
dan82840/Netgear-RBR50 | git_home/linux.git/tools/perf/scripts/python/netdev-times.py | 11271 | 15048 | # Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
| gpl-2.0 |
bbiskup/cpp_benchmarks | vendor/google/googletest/googletest/scripts/upload.py | 24 | 52037 | #!/usr/bin/env python
#
# Copyright 2007, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tool for uploading diffs from a version control system to the codereview app.
Usage summary: upload.py [options] [-- diff_options]
Diff options are passed to the diff command of the underlying system.
Supported version control systems:
Git
Mercurial
Subversion
It is important for Git/Mercurial users to specify a tree/node/branch to diff
against by using the '--rev' option.
"""
# This code is derived from appcfg.py in the App Engine SDK (open source),
# and from ASPN recipe #146306.
import cookielib
import getpass
import logging
import md5
import mimetypes
import optparse
import os
import re
import socket
import subprocess
import sys
import urllib
import urllib2
import urlparse
try:
import readline
except ImportError:
pass
# The logging verbosity:
# 0: Errors only.
# 1: Status messages.
# 2: Info logs.
# 3: Debug logs.
verbosity = 1
# Max size of patch or base file.
MAX_UPLOAD_SIZE = 900 * 1024
def GetEmail(prompt):
"""Prompts the user for their email address and returns it.
The last used email address is saved to a file and offered up as a suggestion
to the user. If the user presses enter without typing in anything the last
used email address is used. If the user enters a new address, it is saved
for next time we prompt.
"""
last_email_file_name = os.path.expanduser("~/.last_codereview_email_address")
last_email = ""
if os.path.exists(last_email_file_name):
try:
last_email_file = open(last_email_file_name, "r")
last_email = last_email_file.readline().strip("\n")
last_email_file.close()
prompt += " [%s]" % last_email
except IOError, e:
pass
email = raw_input(prompt + ": ").strip()
if email:
try:
last_email_file = open(last_email_file_name, "w")
last_email_file.write(email)
last_email_file.close()
except IOError, e:
pass
else:
email = last_email
return email
def StatusUpdate(msg):
"""Print a status message to stdout.
If 'verbosity' is greater than 0, print the message.
Args:
msg: The string to print.
"""
if verbosity > 0:
print msg
def ErrorExit(msg):
"""Print an error message to stderr and exit."""
print >>sys.stderr, msg
sys.exit(1)
class ClientLoginError(urllib2.HTTPError):
"""Raised to indicate there was an error authenticating with ClientLogin."""
def __init__(self, url, code, msg, headers, args):
urllib2.HTTPError.__init__(self, url, code, msg, headers, None)
self.args = args
self.reason = args["Error"]
class AbstractRpcServer(object):
"""Provides a common interface for a simple RPC server."""
def __init__(self, host, auth_function, host_override=None, extra_headers={},
save_cookies=False):
"""Creates a new HttpRpcServer.
Args:
host: The host to send requests to.
auth_function: A function that takes no arguments and returns an
(email, password) tuple when called. Will be called if authentication
is required.
host_override: The host header to send to the server (defaults to host).
extra_headers: A dict of extra headers to append to every request.
save_cookies: If True, save the authentication cookies to local disk.
If False, use an in-memory cookiejar instead. Subclasses must
implement this functionality. Defaults to False.
"""
self.host = host
self.host_override = host_override
self.auth_function = auth_function
self.authenticated = False
self.extra_headers = extra_headers
self.save_cookies = save_cookies
self.opener = self._GetOpener()
if self.host_override:
logging.info("Server: %s; Host: %s", self.host, self.host_override)
else:
logging.info("Server: %s", self.host)
def _GetOpener(self):
"""Returns an OpenerDirector for making HTTP requests.
Returns:
A urllib2.OpenerDirector object.
"""
raise NotImplementedError()
def _CreateRequest(self, url, data=None):
"""Creates a new urllib request."""
logging.debug("Creating request for: '%s' with payload:\n%s", url, data)
req = urllib2.Request(url, data=data)
if self.host_override:
req.add_header("Host", self.host_override)
for key, value in self.extra_headers.iteritems():
req.add_header(key, value)
return req
def _GetAuthToken(self, email, password):
"""Uses ClientLogin to authenticate the user, returning an auth token.
Args:
email: The user's email address
password: The user's password
Raises:
ClientLoginError: If there was an error authenticating with ClientLogin.
HTTPError: If there was some other form of HTTP error.
Returns:
The authentication token returned by ClientLogin.
"""
account_type = "GOOGLE"
if self.host.endswith(".google.com"):
# Needed for use inside Google.
account_type = "HOSTED"
req = self._CreateRequest(
url="https://www.google.com/accounts/ClientLogin",
data=urllib.urlencode({
"Email": email,
"Passwd": password,
"service": "ah",
"source": "rietveld-codereview-upload",
"accountType": account_type,
}),
)
try:
response = self.opener.open(req)
response_body = response.read()
response_dict = dict(x.split("=")
for x in response_body.split("\n") if x)
return response_dict["Auth"]
except urllib2.HTTPError, e:
if e.code == 403:
body = e.read()
response_dict = dict(x.split("=", 1) for x in body.split("\n") if x)
raise ClientLoginError(req.get_full_url(), e.code, e.msg,
e.headers, response_dict)
else:
raise
def _GetAuthCookie(self, auth_token):
"""Fetches authentication cookies for an authentication token.
Args:
auth_token: The authentication token returned by ClientLogin.
Raises:
HTTPError: If there was an error fetching the authentication cookies.
"""
# This is a dummy value to allow us to identify when we're successful.
continue_location = "http://localhost/"
args = {"continue": continue_location, "auth": auth_token}
req = self._CreateRequest("http://%s/_ah/login?%s" %
(self.host, urllib.urlencode(args)))
try:
response = self.opener.open(req)
except urllib2.HTTPError, e:
response = e
if (response.code != 302 or
response.info()["location"] != continue_location):
raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg,
response.headers, response.fp)
self.authenticated = True
def _Authenticate(self):
"""Authenticates the user.
The authentication process works as follows:
1) We get a username and password from the user
2) We use ClientLogin to obtain an AUTH token for the user
(see https://developers.google.com/identity/protocols/AuthForInstalledApps).
3) We pass the auth token to /_ah/login on the server to obtain an
authentication cookie. If login was successful, it tries to redirect
us to the URL we provided.
If we attempt to access the upload API without first obtaining an
authentication cookie, it returns a 401 response and directs us to
authenticate ourselves with ClientLogin.
"""
for i in range(3):
credentials = self.auth_function()
try:
auth_token = self._GetAuthToken(credentials[0], credentials[1])
except ClientLoginError, e:
if e.reason == "BadAuthentication":
print >>sys.stderr, "Invalid username or password."
continue
if e.reason == "CaptchaRequired":
print >>sys.stderr, (
"Please go to\n"
"https://www.google.com/accounts/DisplayUnlockCaptcha\n"
"and verify you are a human. Then try again.")
break
if e.reason == "NotVerified":
print >>sys.stderr, "Account not verified."
break
if e.reason == "TermsNotAgreed":
print >>sys.stderr, "User has not agreed to TOS."
break
if e.reason == "AccountDeleted":
print >>sys.stderr, "The user account has been deleted."
break
if e.reason == "AccountDisabled":
print >>sys.stderr, "The user account has been disabled."
break
if e.reason == "ServiceDisabled":
print >>sys.stderr, ("The user's access to the service has been "
"disabled.")
break
if e.reason == "ServiceUnavailable":
print >>sys.stderr, "The service is not available; try again later."
break
raise
self._GetAuthCookie(auth_token)
return
def Send(self, request_path, payload=None,
content_type="application/octet-stream",
timeout=None,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
# TODO: Don't require authentication. Let the server say
# whether it is necessary.
if not self.authenticated:
self._Authenticate()
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
while True:
tries += 1
args = dict(kwargs)
url = "http://%s%s" % (self.host, request_path)
if args:
url += "?" + urllib.urlencode(args)
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
try:
f = self.opener.open(req)
response = f.read()
f.close()
return response
except urllib2.HTTPError, e:
if tries > 3:
raise
elif e.code == 401:
self._Authenticate()
## elif e.code >= 500 and e.code < 600:
## # Server Error - try again.
## continue
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
class HttpRpcServer(AbstractRpcServer):
"""Provides a simplified RPC-style interface for HTTP requests."""
def _Authenticate(self):
"""Save the cookie jar after authentication."""
super(HttpRpcServer, self)._Authenticate()
if self.save_cookies:
StatusUpdate("Saving authentication cookies to %s" % self.cookie_file)
self.cookie_jar.save()
def _GetOpener(self):
"""Returns an OpenerDirector that supports cookies and ignores redirects.
Returns:
A urllib2.OpenerDirector object.
"""
opener = urllib2.OpenerDirector()
opener.add_handler(urllib2.ProxyHandler())
opener.add_handler(urllib2.UnknownHandler())
opener.add_handler(urllib2.HTTPHandler())
opener.add_handler(urllib2.HTTPDefaultErrorHandler())
opener.add_handler(urllib2.HTTPSHandler())
opener.add_handler(urllib2.HTTPErrorProcessor())
if self.save_cookies:
self.cookie_file = os.path.expanduser("~/.codereview_upload_cookies")
self.cookie_jar = cookielib.MozillaCookieJar(self.cookie_file)
if os.path.exists(self.cookie_file):
try:
self.cookie_jar.load()
self.authenticated = True
StatusUpdate("Loaded authentication cookies from %s" %
self.cookie_file)
except (cookielib.LoadError, IOError):
# Failed to load cookies - just ignore them.
pass
else:
# Create an empty cookie file with mode 600
fd = os.open(self.cookie_file, os.O_CREAT, 0600)
os.close(fd)
# Always chmod the cookie file
os.chmod(self.cookie_file, 0600)
else:
# Don't save cookies across runs of update.py.
self.cookie_jar = cookielib.CookieJar()
opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar))
return opener
parser = optparse.OptionParser(usage="%prog [options] [-- diff_options]")
parser.add_option("-y", "--assume_yes", action="store_true",
dest="assume_yes", default=False,
help="Assume that the answer to yes/no questions is 'yes'.")
# Logging
group = parser.add_option_group("Logging options")
group.add_option("-q", "--quiet", action="store_const", const=0,
dest="verbose", help="Print errors only.")
group.add_option("-v", "--verbose", action="store_const", const=2,
dest="verbose", default=1,
help="Print info level logs (default).")
group.add_option("--noisy", action="store_const", const=3,
dest="verbose", help="Print all logs.")
# Review server
group = parser.add_option_group("Review server options")
group.add_option("-s", "--server", action="store", dest="server",
default="codereview.appspot.com",
metavar="SERVER",
help=("The server to upload to. The format is host[:port]. "
"Defaults to 'codereview.appspot.com'."))
group.add_option("-e", "--email", action="store", dest="email",
metavar="EMAIL", default=None,
help="The username to use. Will prompt if omitted.")
group.add_option("-H", "--host", action="store", dest="host",
metavar="HOST", default=None,
help="Overrides the Host header sent with all RPCs.")
group.add_option("--no_cookies", action="store_false",
dest="save_cookies", default=True,
help="Do not save authentication cookies to local disk.")
# Issue
group = parser.add_option_group("Issue options")
group.add_option("-d", "--description", action="store", dest="description",
metavar="DESCRIPTION", default=None,
help="Optional description when creating an issue.")
group.add_option("-f", "--description_file", action="store",
dest="description_file", metavar="DESCRIPTION_FILE",
default=None,
help="Optional path of a file that contains "
"the description when creating an issue.")
group.add_option("-r", "--reviewers", action="store", dest="reviewers",
metavar="REVIEWERS", default=None,
help="Add reviewers (comma separated email addresses).")
group.add_option("--cc", action="store", dest="cc",
metavar="CC", default=None,
help="Add CC (comma separated email addresses).")
# Upload options
group = parser.add_option_group("Patch options")
group.add_option("-m", "--message", action="store", dest="message",
metavar="MESSAGE", default=None,
help="A message to identify the patch. "
"Will prompt if omitted.")
group.add_option("-i", "--issue", type="int", action="store",
metavar="ISSUE", default=None,
help="Issue number to which to add. Defaults to new issue.")
group.add_option("--download_base", action="store_true",
dest="download_base", default=False,
help="Base files will be downloaded by the server "
"(side-by-side diffs may not work on files with CRs).")
group.add_option("--rev", action="store", dest="revision",
metavar="REV", default=None,
help="Branch/tree/revision to diff against (used by DVCS).")
group.add_option("--send_mail", action="store_true",
dest="send_mail", default=False,
help="Send notification email to reviewers.")
def GetRpcServer(options):
"""Returns an instance of an AbstractRpcServer.
Returns:
A new AbstractRpcServer, on which RPC calls can be made.
"""
rpc_server_class = HttpRpcServer
def GetUserCredentials():
"""Prompts the user for a username and password."""
email = options.email
if email is None:
email = GetEmail("Email (login for uploading to %s)" % options.server)
password = getpass.getpass("Password for %s: " % email)
return (email, password)
# If this is the dev_appserver, use fake authentication.
host = (options.host or options.server).lower()
if host == "localhost" or host.startswith("localhost:"):
email = options.email
if email is None:
email = "[email protected]"
logging.info("Using debug user %s. Override with --email" % email)
server = rpc_server_class(
options.server,
lambda: (email, "password"),
host_override=options.host,
extra_headers={"Cookie":
'dev_appserver_login="%s:False"' % email},
save_cookies=options.save_cookies)
# Don't try to talk to ClientLogin.
server.authenticated = True
return server
return rpc_server_class(options.server, GetUserCredentials,
host_override=options.host,
save_cookies=options.save_cookies)
def EncodeMultipartFormData(fields, files):
"""Encode form fields for multipart/form-data.
Args:
fields: A sequence of (name, value) elements for regular form fields.
files: A sequence of (name, filename, value) elements for data to be
uploaded as files.
Returns:
(content_type, body) ready for httplib.HTTP instance.
Source:
https://web.archive.org/web/20160116052001/code.activestate.com/recipes/146306
"""
BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
CRLF = '\r\n'
lines = []
for (key, value) in fields:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"' % key)
lines.append('')
lines.append(value)
for (key, filename, value) in files:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' %
(key, filename))
lines.append('Content-Type: %s' % GetContentType(filename))
lines.append('')
lines.append(value)
lines.append('--' + BOUNDARY + '--')
lines.append('')
body = CRLF.join(lines)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def GetContentType(filename):
"""Helper to guess the content-type from the filename."""
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
# Use a shell for subcommands on Windows to get a PATH search.
use_shell = sys.platform.startswith("win")
def RunShellWithReturnCode(command, print_output=False,
universal_newlines=True):
"""Executes a command and returns the output from stdout and the return code.
Args:
command: Command to execute.
print_output: If True, the output is printed to stdout.
If False, both stdout and stderr are ignored.
universal_newlines: Use universal_newlines flag (default: True).
Returns:
Tuple (output, return code)
"""
logging.info("Running %s", command)
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=use_shell, universal_newlines=universal_newlines)
if print_output:
output_array = []
while True:
line = p.stdout.readline()
if not line:
break
print line.strip("\n")
output_array.append(line)
output = "".join(output_array)
else:
output = p.stdout.read()
p.wait()
errout = p.stderr.read()
if print_output and errout:
print >>sys.stderr, errout
p.stdout.close()
p.stderr.close()
return output, p.returncode
def RunShell(command, silent_ok=False, universal_newlines=True,
print_output=False):
data, retcode = RunShellWithReturnCode(command, print_output,
universal_newlines)
if retcode:
ErrorExit("Got error status from %s:\n%s" % (command, data))
if not silent_ok and not data:
ErrorExit("No output from %s" % command)
return data
class VersionControlSystem(object):
"""Abstract base class providing an interface to the VCS."""
def __init__(self, options):
"""Constructor.
Args:
options: Command line options.
"""
self.options = options
def GenerateDiff(self, args):
"""Return the current diff as a string.
Args:
args: Extra arguments to pass to the diff command.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def CheckForUnknownFiles(self):
"""Show an "are you sure?" prompt if there are unknown files."""
unknown_files = self.GetUnknownFiles()
if unknown_files:
print "The following files are not added to version control:"
for line in unknown_files:
print line
prompt = "Are you sure to continue?(y/N) "
answer = raw_input(prompt).strip()
if answer != "y":
ErrorExit("User aborted")
def GetBaseFile(self, filename):
"""Get the content of the upstream version of a file.
Returns:
A tuple (base_content, new_content, is_binary, status)
base_content: The contents of the base file.
new_content: For text files, this is empty. For binary files, this is
the contents of the new file, since the diff output won't contain
information to reconstruct the current file.
is_binary: True iff the file is binary.
status: The status of the file.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetBaseFiles(self, diff):
"""Helper that calls GetBase file for each file in the patch.
Returns:
A dictionary that maps from filename to GetBaseFile's tuple. Filenames
are retrieved based on lines that start with "Index:" or
"Property changes on:".
"""
files = {}
for line in diff.splitlines(True):
if line.startswith('Index:') or line.startswith('Property changes on:'):
unused, filename = line.split(':', 1)
# On Windows if a file has property changes its filename uses '\'
# instead of '/'.
filename = filename.strip().replace('\\', '/')
files[filename] = self.GetBaseFile(filename)
return files
def UploadBaseFiles(self, issue, rpc_server, patch_list, patchset, options,
files):
"""Uploads the base files (and if necessary, the current ones as well)."""
def UploadFile(filename, file_id, content, is_binary, status, is_base):
"""Uploads a file to the server."""
file_too_large = False
if is_base:
type = "base"
else:
type = "current"
if len(content) > MAX_UPLOAD_SIZE:
print ("Not uploading the %s file for %s because it's too large." %
(type, filename))
file_too_large = True
content = ""
checksum = md5.new(content).hexdigest()
if options.verbose > 0 and not file_too_large:
print "Uploading %s file for %s" % (type, filename)
url = "/%d/upload_content/%d/%d" % (int(issue), int(patchset), file_id)
form_fields = [("filename", filename),
("status", status),
("checksum", checksum),
("is_binary", str(is_binary)),
("is_current", str(not is_base)),
]
if file_too_large:
form_fields.append(("file_too_large", "1"))
if options.email:
form_fields.append(("user", options.email))
ctype, body = EncodeMultipartFormData(form_fields,
[("data", filename, content)])
response_body = rpc_server.Send(url, body,
content_type=ctype)
if not response_body.startswith("OK"):
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
patches = dict()
[patches.setdefault(v, k) for k, v in patch_list]
for filename in patches.keys():
base_content, new_content, is_binary, status = files[filename]
file_id_str = patches.get(filename)
if file_id_str.find("nobase") != -1:
base_content = None
file_id_str = file_id_str[file_id_str.rfind("_") + 1:]
file_id = int(file_id_str)
if base_content != None:
UploadFile(filename, file_id, base_content, is_binary, status, True)
if new_content != None:
UploadFile(filename, file_id, new_content, is_binary, status, False)
def IsImage(self, filename):
"""Returns true if the filename has an image extension."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False
return mimetype.startswith("image/")
class SubversionVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Subversion."""
def __init__(self, options):
super(SubversionVCS, self).__init__(options)
if self.options.revision:
match = re.match(r"(\d+)(:(\d+))?", self.options.revision)
if not match:
ErrorExit("Invalid Subversion revision %s." % self.options.revision)
self.rev_start = match.group(1)
self.rev_end = match.group(3)
else:
self.rev_start = self.rev_end = None
# Cache output from "svn list -r REVNO dirname".
# Keys: dirname, Values: 2-tuple (output for start rev and end rev).
self.svnls_cache = {}
# SVN base URL is required to fetch files deleted in an older revision.
# Result is cached to not guess it over and over again in GetBaseFile().
required = self.options.download_base or self.options.revision is not None
self.svn_base = self._GuessBase(required)
def GuessBase(self, required):
"""Wrapper for _GuessBase."""
return self.svn_base
def _GuessBase(self, required):
"""Returns the SVN base URL.
Args:
required: If true, exits if the url can't be guessed, otherwise None is
returned.
"""
info = RunShell(["svn", "info"])
for line in info.splitlines():
words = line.split()
if len(words) == 2 and words[0] == "URL:":
url = words[1]
scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
username, netloc = urllib.splituser(netloc)
if username:
logging.info("Removed username from base URL")
if netloc.endswith("svn.python.org"):
if netloc == "svn.python.org":
if path.startswith("/projects/"):
path = path[9:]
elif netloc != "[email protected]":
ErrorExit("Unrecognized Python URL: %s" % url)
base = "http://svn.python.org/view/*checkout*%s/" % path
logging.info("Guessed Python base = %s", base)
elif netloc.endswith("svn.collab.net"):
if path.startswith("/repos/"):
path = path[6:]
base = "http://svn.collab.net/viewvc/*checkout*%s/" % path
logging.info("Guessed CollabNet base = %s", base)
elif netloc.endswith(".googlecode.com"):
path = path + "/"
base = urlparse.urlunparse(("http", netloc, path, params,
query, fragment))
logging.info("Guessed Google Code base = %s", base)
else:
path = path + "/"
base = urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
logging.info("Guessed base = %s", base)
return base
if required:
ErrorExit("Can't find URL in output from svn info")
return None
def GenerateDiff(self, args):
cmd = ["svn", "diff"]
if self.options.revision:
cmd += ["-r", self.options.revision]
cmd.extend(args)
data = RunShell(cmd)
count = 0
for line in data.splitlines():
if line.startswith("Index:") or line.startswith("Property changes on:"):
count += 1
logging.info(line)
if not count:
ErrorExit("No valid patches found in output from svn diff")
return data
def _CollapseKeywords(self, content, keyword_str):
"""Collapses SVN keywords."""
# svn cat translates keywords but svn diff doesn't. As a result of this
# behavior patching.PatchChunks() fails with a chunk mismatch error.
# This part was originally written by the Review Board development team
# who had the same problem (https://reviews.reviewboard.org/r/276/).
# Mapping of keywords to known aliases
svn_keywords = {
# Standard keywords
'Date': ['Date', 'LastChangedDate'],
'Revision': ['Revision', 'LastChangedRevision', 'Rev'],
'Author': ['Author', 'LastChangedBy'],
'HeadURL': ['HeadURL', 'URL'],
'Id': ['Id'],
# Aliases
'LastChangedDate': ['LastChangedDate', 'Date'],
'LastChangedRevision': ['LastChangedRevision', 'Rev', 'Revision'],
'LastChangedBy': ['LastChangedBy', 'Author'],
'URL': ['URL', 'HeadURL'],
}
def repl(m):
if m.group(2):
return "$%s::%s$" % (m.group(1), " " * len(m.group(3)))
return "$%s$" % m.group(1)
keywords = [keyword
for name in keyword_str.split(" ")
for keyword in svn_keywords.get(name, [])]
return re.sub(r"\$(%s):(:?)([^\$]+)\$" % '|'.join(keywords), repl, content)
def GetUnknownFiles(self):
status = RunShell(["svn", "status", "--ignore-externals"], silent_ok=True)
unknown_files = []
for line in status.split("\n"):
if line and line[0] == "?":
unknown_files.append(line)
return unknown_files
def ReadFile(self, filename):
"""Returns the contents of a file."""
file = open(filename, 'rb')
result = ""
try:
result = file.read()
finally:
file.close()
return result
def GetStatus(self, filename):
"""Returns the status of a file."""
if not self.options.revision:
status = RunShell(["svn", "status", "--ignore-externals", filename])
if not status:
ErrorExit("svn status returned no output for %s" % filename)
status_lines = status.splitlines()
# If file is in a cl, the output will begin with
# "\n--- Changelist 'cl_name':\n". See
# https://web.archive.org/web/20090918234815/svn.collab.net/repos/svn/trunk/notes/changelist-design.txt
if (len(status_lines) == 3 and
not status_lines[0] and
status_lines[1].startswith("--- Changelist")):
status = status_lines[2]
else:
status = status_lines[0]
# If we have a revision to diff against we need to run "svn list"
# for the old and the new revision and compare the results to get
# the correct status for a file.
else:
dirname, relfilename = os.path.split(filename)
if dirname not in self.svnls_cache:
cmd = ["svn", "list", "-r", self.rev_start, dirname or "."]
out, returncode = RunShellWithReturnCode(cmd)
if returncode:
ErrorExit("Failed to get status for %s." % filename)
old_files = out.splitlines()
args = ["svn", "list"]
if self.rev_end:
args += ["-r", self.rev_end]
cmd = args + [dirname or "."]
out, returncode = RunShellWithReturnCode(cmd)
if returncode:
ErrorExit("Failed to run command %s" % cmd)
self.svnls_cache[dirname] = (old_files, out.splitlines())
old_files, new_files = self.svnls_cache[dirname]
if relfilename in old_files and relfilename not in new_files:
status = "D "
elif relfilename in old_files and relfilename in new_files:
status = "M "
else:
status = "A "
return status
def GetBaseFile(self, filename):
status = self.GetStatus(filename)
base_content = None
new_content = None
# If a file is copied its status will be "A +", which signifies
# "addition-with-history". See "svn st" for more information. We need to
# upload the original file or else diff parsing will fail if the file was
# edited.
if status[0] == "A" and status[3] != "+":
# We'll need to upload the new content if we're adding a binary file
# since diff's output won't contain it.
mimetype = RunShell(["svn", "propget", "svn:mime-type", filename],
silent_ok=True)
base_content = ""
is_binary = mimetype and not mimetype.startswith("text/")
if is_binary and self.IsImage(filename):
new_content = self.ReadFile(filename)
elif (status[0] in ("M", "D", "R") or
(status[0] == "A" and status[3] == "+") or # Copied file.
(status[0] == " " and status[1] == "M")): # Property change.
args = []
if self.options.revision:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
# Don't change filename, it's needed later.
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:mime-type", url]
mimetype, returncode = RunShellWithReturnCode(cmd)
if returncode:
# File does not exist in the requested revision.
# Reset mimetype, it contains an error message.
mimetype = ""
get_base = False
is_binary = mimetype and not mimetype.startswith("text/")
if status[0] == " ":
# Empty base content just to force an upload.
base_content = ""
elif is_binary:
if self.IsImage(filename):
get_base = True
if status[0] == "M":
if not self.rev_end:
new_content = self.ReadFile(filename)
else:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_end)
new_content = RunShell(["svn", "cat", url],
universal_newlines=True, silent_ok=True)
else:
base_content = ""
else:
get_base = True
if get_base:
if is_binary:
universal_newlines = False
else:
universal_newlines = True
if self.rev_start:
# "svn cat -r REV delete_file.txt" doesn't work. cat requires
# the full URL with "@REV" appended instead of using "-r" option.
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
base_content = RunShell(["svn", "cat", url],
universal_newlines=universal_newlines,
silent_ok=True)
else:
base_content = RunShell(["svn", "cat", filename],
universal_newlines=universal_newlines,
silent_ok=True)
if not is_binary:
args = []
if self.rev_start:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:keywords", url]
keywords, returncode = RunShellWithReturnCode(cmd)
if keywords and not returncode:
base_content = self._CollapseKeywords(base_content, keywords)
else:
StatusUpdate("svn status returned unexpected output: %s" % status)
sys.exit(1)
return base_content, new_content, is_binary, status[0:5]
class GitVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Git."""
def __init__(self, options):
super(GitVCS, self).__init__(options)
# Map of filename -> hash of base file.
self.base_hashes = {}
def GenerateDiff(self, extra_args):
# This is more complicated than svn's GenerateDiff because we must convert
# the diff output to include an svn-style "Index:" line as well as record
# the hashes of the base files, so we can upload them along with our diff.
if self.options.revision:
extra_args = [self.options.revision] + extra_args
gitdiff = RunShell(["git", "diff", "--full-index"] + extra_args)
svndiff = []
filecount = 0
filename = None
for line in gitdiff.splitlines():
match = re.match(r"diff --git a/(.*) b/.*$", line)
if match:
filecount += 1
filename = match.group(1)
svndiff.append("Index: %s\n" % filename)
else:
# The "index" line in a git diff looks like this (long hashes elided):
# index 82c0d44..b2cee3f 100755
# We want to save the left hash, as that identifies the base file.
match = re.match(r"index (\w+)\.\.", line)
if match:
self.base_hashes[filename] = match.group(1)
svndiff.append(line + "\n")
if not filecount:
ErrorExit("No valid patches found in output from git diff")
return "".join(svndiff)
def GetUnknownFiles(self):
status = RunShell(["git", "ls-files", "--exclude-standard", "--others"],
silent_ok=True)
return status.splitlines()
def GetBaseFile(self, filename):
hash = self.base_hashes[filename]
base_content = None
new_content = None
is_binary = False
if hash == "0" * 40: # All-zero hash indicates no base file.
status = "A"
base_content = ""
else:
status = "M"
base_content, returncode = RunShellWithReturnCode(["git", "show", hash])
if returncode:
ErrorExit("Got error status from 'git show %s'" % hash)
return (base_content, new_content, is_binary, status)
class MercurialVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Mercurial."""
def __init__(self, options, repo_dir):
super(MercurialVCS, self).__init__(options)
# Absolute path to repository (we can be in a subdir)
self.repo_dir = os.path.normpath(repo_dir)
# Compute the subdir
cwd = os.path.normpath(os.getcwd())
assert cwd.startswith(self.repo_dir)
self.subdir = cwd[len(self.repo_dir):].lstrip(r"\/")
if self.options.revision:
self.base_rev = self.options.revision
else:
self.base_rev = RunShell(["hg", "parent", "-q"]).split(':')[1].strip()
def _GetRelPath(self, filename):
"""Get relative path of a file according to the current directory,
given its logical path in the repo."""
assert filename.startswith(self.subdir), filename
return filename[len(self.subdir):].lstrip(r"\/")
def GenerateDiff(self, extra_args):
# If no file specified, restrict to the current subdir
extra_args = extra_args or ["."]
cmd = ["hg", "diff", "--git", "-r", self.base_rev] + extra_args
data = RunShell(cmd, silent_ok=True)
svndiff = []
filecount = 0
for line in data.splitlines():
m = re.match("diff --git a/(\S+) b/(\S+)", line)
if m:
# Modify line to make it look like as it comes from svn diff.
# With this modification no changes on the server side are required
# to make upload.py work with Mercurial repos.
# NOTE: for proper handling of moved/copied files, we have to use
# the second filename.
filename = m.group(2)
svndiff.append("Index: %s" % filename)
svndiff.append("=" * 67)
filecount += 1
logging.info(line)
else:
svndiff.append(line)
if not filecount:
ErrorExit("No valid patches found in output from hg diff")
return "\n".join(svndiff) + "\n"
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
args = []
status = RunShell(["hg", "status", "--rev", self.base_rev, "-u", "."],
silent_ok=True)
unknown_files = []
for line in status.splitlines():
st, fn = line.split(" ", 1)
if st == "?":
unknown_files.append(fn)
return unknown_files
def GetBaseFile(self, filename):
# "hg status" and "hg cat" both take a path relative to the current subdir
# rather than to the repo root, but "hg diff" has given us the full path
# to the repo root.
base_content = ""
new_content = None
is_binary = False
oldrelpath = relpath = self._GetRelPath(filename)
# "hg status -C" returns two lines for moved/copied files, one otherwise
out = RunShell(["hg", "status", "-C", "--rev", self.base_rev, relpath])
out = out.splitlines()
# HACK: strip error message about missing file/directory if it isn't in
# the working copy
if out[0].startswith('%s: ' % relpath):
out = out[1:]
if len(out) > 1:
# Moved/copied => considered as modified, use old filename to
# retrieve base contents
oldrelpath = out[1].strip()
status = "M"
else:
status, _ = out[0].split(' ', 1)
if status != "A":
base_content = RunShell(["hg", "cat", "-r", self.base_rev, oldrelpath],
silent_ok=True)
is_binary = "\0" in base_content # Mercurial's heuristic
if status != "R":
new_content = open(relpath, "rb").read()
is_binary = is_binary or "\0" in new_content
if is_binary and base_content:
# Fetch again without converting newlines
base_content = RunShell(["hg", "cat", "-r", self.base_rev, oldrelpath],
silent_ok=True, universal_newlines=False)
if not is_binary or not self.IsImage(relpath):
new_content = None
return base_content, new_content, is_binary, status
# NOTE: The SplitPatch function is duplicated in engine.py, keep them in sync.
def SplitPatch(data):
"""Splits a patch into separate pieces for each file.
Args:
data: A string containing the output of svn diff.
Returns:
A list of 2-tuple (filename, text) where text is the svn diff output
pertaining to filename.
"""
patches = []
filename = None
diff = []
for line in data.splitlines(True):
new_filename = None
if line.startswith('Index:'):
unused, new_filename = line.split(':', 1)
new_filename = new_filename.strip()
elif line.startswith('Property changes on:'):
unused, temp_filename = line.split(':', 1)
# When a file is modified, paths use '/' between directories, however
# when a property is modified '\' is used on Windows. Make them the same
# otherwise the file shows up twice.
temp_filename = temp_filename.strip().replace('\\', '/')
if temp_filename != filename:
# File has property changes but no modifications, create a new diff.
new_filename = temp_filename
if new_filename:
if filename and diff:
patches.append((filename, ''.join(diff)))
filename = new_filename
diff = [line]
continue
if diff is not None:
diff.append(line)
if filename and diff:
patches.append((filename, ''.join(diff)))
return patches
def UploadSeparatePatches(issue, rpc_server, patchset, data, options):
"""Uploads a separate patch for each file in the diff output.
Returns a list of [patch_key, filename] for each file.
"""
patches = SplitPatch(data)
rv = []
for patch in patches:
if len(patch[1]) > MAX_UPLOAD_SIZE:
print ("Not uploading the patch for " + patch[0] +
" because the file is too large.")
continue
form_fields = [("filename", patch[0])]
if not options.download_base:
form_fields.append(("content_upload", "1"))
files = [("data", "data.diff", patch[1])]
ctype, body = EncodeMultipartFormData(form_fields, files)
url = "/%d/upload_patch/%d" % (int(issue), int(patchset))
print "Uploading patch for " + patch[0]
response_body = rpc_server.Send(url, body, content_type=ctype)
lines = response_body.splitlines()
if not lines or lines[0] != "OK":
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
rv.append([lines[1], patch[0]])
return rv
def GuessVCS(options):
"""Helper to guess the version control system.
This examines the current directory, guesses which VersionControlSystem
we're using, and returns an instance of the appropriate class. Exit with an
error if we can't figure it out.
Returns:
A VersionControlSystem instance. Exits if the VCS can't be guessed.
"""
# Mercurial has a command to get the base directory of a repository
# Try running it, but don't die if we don't have hg installed.
# NOTE: we try Mercurial first as it can sit on top of an SVN working copy.
try:
out, returncode = RunShellWithReturnCode(["hg", "root"])
if returncode == 0:
return MercurialVCS(options, out.strip())
except OSError, (errno, message):
if errno != 2: # ENOENT -- they don't have hg installed.
raise
# Subversion has a .svn in all working directories.
if os.path.isdir('.svn'):
logging.info("Guessed VCS = Subversion")
return SubversionVCS(options)
# Git has a command to test if you're in a git tree.
# Try running it, but don't die if we don't have git installed.
try:
out, returncode = RunShellWithReturnCode(["git", "rev-parse",
"--is-inside-work-tree"])
if returncode == 0:
return GitVCS(options)
except OSError, (errno, message):
if errno != 2: # ENOENT -- they don't have git installed.
raise
ErrorExit(("Could not guess version control system. "
"Are you in a working copy directory?"))
def RealMain(argv, data=None):
"""The real main function.
Args:
argv: Command line arguments.
data: Diff contents. If None (default) the diff is generated by
the VersionControlSystem implementation returned by GuessVCS().
Returns:
A 2-tuple (issue id, patchset id).
The patchset id is None if the base files are not uploaded by this
script (applies only to SVN checkouts).
"""
logging.basicConfig(format=("%(asctime).19s %(levelname)s %(filename)s:"
"%(lineno)s %(message)s "))
os.environ['LC_ALL'] = 'C'
options, args = parser.parse_args(argv[1:])
global verbosity
verbosity = options.verbose
if verbosity >= 3:
logging.getLogger().setLevel(logging.DEBUG)
elif verbosity >= 2:
logging.getLogger().setLevel(logging.INFO)
vcs = GuessVCS(options)
if isinstance(vcs, SubversionVCS):
# base field is only allowed for Subversion.
# Note: Fetching base files may become deprecated in future releases.
base = vcs.GuessBase(options.download_base)
else:
base = None
if not base and options.download_base:
options.download_base = True
logging.info("Enabled upload of base file")
if not options.assume_yes:
vcs.CheckForUnknownFiles()
if data is None:
data = vcs.GenerateDiff(args)
files = vcs.GetBaseFiles(data)
if verbosity >= 1:
print "Upload server:", options.server, "(change with -s/--server)"
if options.issue:
prompt = "Message describing this patch set: "
else:
prompt = "New issue subject: "
message = options.message or raw_input(prompt).strip()
if not message:
ErrorExit("A non-empty message is required")
rpc_server = GetRpcServer(options)
form_fields = [("subject", message)]
if base:
form_fields.append(("base", base))
if options.issue:
form_fields.append(("issue", str(options.issue)))
if options.email:
form_fields.append(("user", options.email))
if options.reviewers:
for reviewer in options.reviewers.split(','):
if "@" in reviewer and not reviewer.split("@")[1].count(".") == 1:
ErrorExit("Invalid email address: %s" % reviewer)
form_fields.append(("reviewers", options.reviewers))
if options.cc:
for cc in options.cc.split(','):
if "@" in cc and not cc.split("@")[1].count(".") == 1:
ErrorExit("Invalid email address: %s" % cc)
form_fields.append(("cc", options.cc))
description = options.description
if options.description_file:
if options.description:
ErrorExit("Can't specify description and description_file")
file = open(options.description_file, 'r')
description = file.read()
file.close()
if description:
form_fields.append(("description", description))
# Send a hash of all the base file so the server can determine if a copy
# already exists in an earlier patchset.
base_hashes = ""
for file, info in files.iteritems():
if not info[0] is None:
checksum = md5.new(info[0]).hexdigest()
if base_hashes:
base_hashes += "|"
base_hashes += checksum + ":" + file
form_fields.append(("base_hashes", base_hashes))
# If we're uploading base files, don't send the email before the uploads, so
# that it contains the file status.
if options.send_mail and options.download_base:
form_fields.append(("send_mail", "1"))
if not options.download_base:
form_fields.append(("content_upload", "1"))
if len(data) > MAX_UPLOAD_SIZE:
print "Patch is large, so uploading file patches separately."
uploaded_diff_file = []
form_fields.append(("separate_patches", "1"))
else:
uploaded_diff_file = [("data", "data.diff", data)]
ctype, body = EncodeMultipartFormData(form_fields, uploaded_diff_file)
response_body = rpc_server.Send("/upload", body, content_type=ctype)
patchset = None
if not options.download_base or not uploaded_diff_file:
lines = response_body.splitlines()
if len(lines) >= 2:
msg = lines[0]
patchset = lines[1].strip()
patches = [x.split(" ", 1) for x in lines[2:]]
else:
msg = response_body
else:
msg = response_body
StatusUpdate(msg)
if not response_body.startswith("Issue created.") and \
not response_body.startswith("Issue updated."):
sys.exit(0)
issue = msg[msg.rfind("/")+1:]
if not uploaded_diff_file:
result = UploadSeparatePatches(issue, rpc_server, patchset, data, options)
if not options.download_base:
patches = result
if not options.download_base:
vcs.UploadBaseFiles(issue, rpc_server, patches, patchset, options, files)
if options.send_mail:
rpc_server.Send("/" + issue + "/mail", payload="")
return issue, patchset
def main():
try:
RealMain(sys.argv)
except KeyboardInterrupt:
print
StatusUpdate("Interrupted.")
sys.exit(1)
if __name__ == "__main__":
main()
| mit |
GNOME/orca | test/keystrokes/oowriter/ui_role_combo_box.py | 1 | 2256 | #!/usr/bin/python
"""Test of Orca's presentation of a combo box."""
from macaroon.playback import *
import utils
sequence = MacroSequence()
sequence.append(PauseAction(3000))
sequence.append(TypeAction("This is a test."))
sequence.append(KeyComboAction("Left"))
sequence.append(KeyComboAction("<Control><Shift>Left"))
sequence.append(KeyComboAction("<Alt>o"))
sequence.append(KeyComboAction("Down"))
sequence.append(KeyComboAction("Down"))
sequence.append(KeyComboAction("Down"))
sequence.append(KeyComboAction("Down"))
sequence.append(KeyComboAction("Down"))
sequence.append(KeyComboAction("Down"))
sequence.append(KeyComboAction("Return"))
sequence.append(PauseAction(3000))
sequence.append(KeyComboAction("<Control>Page_Down"))
sequence.append(KeyComboAction("Tab"))
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Tab"))
sequence.append(utils.AssertPresentationAction(
"1. Move to Strikethrough",
["BRAILLE LINE: 'soffice application Character dialog Font Effects page tab Strikethrough: (Without) combo box'",
" VISIBLE: 'Strikethrough: (Without) combo b', cursor=16",
"SPEECH OUTPUT: 'Strikethrough: (Without) combo box.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"2. Down Arrow",
["KNOWN ISSUE: We seem to be presenting extra context here",
"BRAILLE LINE: 'soffice application Character dialog Font Effects page tab Strikethrough: Single combo box Single list item'",
" VISIBLE: 'Single list item', cursor=1",
"SPEECH OUTPUT: 'Character dialog'",
"SPEECH OUTPUT: 'Font Effects page tab.'",
"SPEECH OUTPUT: 'Single.'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"3. Up Arrow",
["BRAILLE LINE: 'soffice application Character dialog Font Effects page tab Strikethrough: (Without) combo box (Without) list item'",
" VISIBLE: '(Without) list item', cursor=1",
"SPEECH OUTPUT: '(Without)'"]))
sequence.append(KeyComboAction("<Alt>F4"))
sequence.append(utils.AssertionSummaryAction())
sequence.start()
| lgpl-2.1 |
mcanthony/rethinkdb | external/v8_3.30.33.16/testing/gtest/scripts/gen_gtest_pred_impl.py | 2538 | 21986 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""gen_gtest_pred_impl.py v0.1
Generates the implementation of Google Test predicate assertions and
accompanying tests.
Usage:
gen_gtest_pred_impl.py MAX_ARITY
where MAX_ARITY is a positive integer.
The command generates the implementation of up-to MAX_ARITY-ary
predicate assertions, and writes it to file gtest_pred_impl.h in the
directory where the script is. It also generates the accompanying
unit test in file gtest_pred_impl_unittest.cc.
"""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import sys
import time
# Where this script is.
SCRIPT_DIR = os.path.dirname(sys.argv[0])
# Where to store the generated header.
HEADER = os.path.join(SCRIPT_DIR, '../include/gtest/gtest_pred_impl.h')
# Where to store the generated unit test.
UNIT_TEST = os.path.join(SCRIPT_DIR, '../test/gtest_pred_impl_unittest.cc')
def HeaderPreamble(n):
"""Returns the preamble for the header file.
Args:
n: the maximum arity of the predicate macros to be generated.
"""
# A map that defines the values used in the preamble template.
DEFS = {
'today' : time.strftime('%m/%d/%Y'),
'year' : time.strftime('%Y'),
'command' : '%s %s' % (os.path.basename(sys.argv[0]), n),
'n' : n
}
return (
"""// Copyright 2006, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is AUTOMATICALLY GENERATED on %(today)s by command
// '%(command)s'. DO NOT EDIT BY HAND!
//
// Implements a family of generic predicate assertion macros.
#ifndef GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
#define GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
// Makes sure this header is not included before gtest.h.
#ifndef GTEST_INCLUDE_GTEST_GTEST_H_
# error Do not include gtest_pred_impl.h directly. Include gtest.h instead.
#endif // GTEST_INCLUDE_GTEST_GTEST_H_
// This header implements a family of generic predicate assertion
// macros:
//
// ASSERT_PRED_FORMAT1(pred_format, v1)
// ASSERT_PRED_FORMAT2(pred_format, v1, v2)
// ...
//
// where pred_format is a function or functor that takes n (in the
// case of ASSERT_PRED_FORMATn) values and their source expression
// text, and returns a testing::AssertionResult. See the definition
// of ASSERT_EQ in gtest.h for an example.
//
// If you don't care about formatting, you can use the more
// restrictive version:
//
// ASSERT_PRED1(pred, v1)
// ASSERT_PRED2(pred, v1, v2)
// ...
//
// where pred is an n-ary function or functor that returns bool,
// and the values v1, v2, ..., must support the << operator for
// streaming to std::ostream.
//
// We also define the EXPECT_* variations.
//
// For now we only support predicates whose arity is at most %(n)s.
// Please email [email protected] if you need
// support for higher arities.
// GTEST_ASSERT_ is the basic statement to which all of the assertions
// in this file reduce. Don't use this in your code.
#define GTEST_ASSERT_(expression, on_failure) \\
GTEST_AMBIGUOUS_ELSE_BLOCKER_ \\
if (const ::testing::AssertionResult gtest_ar = (expression)) \\
; \\
else \\
on_failure(gtest_ar.failure_message())
""" % DEFS)
def Arity(n):
"""Returns the English name of the given arity."""
if n < 0:
return None
elif n <= 3:
return ['nullary', 'unary', 'binary', 'ternary'][n]
else:
return '%s-ary' % n
def Title(word):
"""Returns the given word in title case. The difference between
this and string's title() method is that Title('4-ary') is '4-ary'
while '4-ary'.title() is '4-Ary'."""
return word[0].upper() + word[1:]
def OneTo(n):
"""Returns the list [1, 2, 3, ..., n]."""
return range(1, n + 1)
def Iter(n, format, sep=''):
"""Given a positive integer n, a format string that contains 0 or
more '%s' format specs, and optionally a separator string, returns
the join of n strings, each formatted with the format string on an
iterator ranged from 1 to n.
Example:
Iter(3, 'v%s', sep=', ') returns 'v1, v2, v3'.
"""
# How many '%s' specs are in format?
spec_count = len(format.split('%s')) - 1
return sep.join([format % (spec_count * (i,)) for i in OneTo(n)])
def ImplementationForArity(n):
"""Returns the implementation of n-ary predicate assertions."""
# A map the defines the values used in the implementation template.
DEFS = {
'n' : str(n),
'vs' : Iter(n, 'v%s', sep=', '),
'vts' : Iter(n, '#v%s', sep=', '),
'arity' : Arity(n),
'Arity' : Title(Arity(n))
}
impl = """
// Helper function for implementing {EXPECT|ASSERT}_PRED%(n)s. Don't use
// this in your code.
template <typename Pred""" % DEFS
impl += Iter(n, """,
typename T%s""")
impl += """>
AssertionResult AssertPred%(n)sHelper(const char* pred_text""" % DEFS
impl += Iter(n, """,
const char* e%s""")
impl += """,
Pred pred"""
impl += Iter(n, """,
const T%s& v%s""")
impl += """) {
if (pred(%(vs)s)) return AssertionSuccess();
""" % DEFS
impl += ' return AssertionFailure() << pred_text << "("'
impl += Iter(n, """
<< e%s""", sep=' << ", "')
impl += ' << ") evaluates to false, where"'
impl += Iter(n, """
<< "\\n" << e%s << " evaluates to " << v%s""")
impl += """;
}
// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT%(n)s.
// Don't use this in your code.
#define GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, on_failure)\\
GTEST_ASSERT_(pred_format(%(vts)s, %(vs)s), \\
on_failure)
// Internal macro for implementing {EXPECT|ASSERT}_PRED%(n)s. Don't use
// this in your code.
#define GTEST_PRED%(n)s_(pred, %(vs)s, on_failure)\\
GTEST_ASSERT_(::testing::AssertPred%(n)sHelper(#pred""" % DEFS
impl += Iter(n, """, \\
#v%s""")
impl += """, \\
pred"""
impl += Iter(n, """, \\
v%s""")
impl += """), on_failure)
// %(Arity)s predicate assertion macros.
#define EXPECT_PRED_FORMAT%(n)s(pred_format, %(vs)s) \\
GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, GTEST_NONFATAL_FAILURE_)
#define EXPECT_PRED%(n)s(pred, %(vs)s) \\
GTEST_PRED%(n)s_(pred, %(vs)s, GTEST_NONFATAL_FAILURE_)
#define ASSERT_PRED_FORMAT%(n)s(pred_format, %(vs)s) \\
GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, GTEST_FATAL_FAILURE_)
#define ASSERT_PRED%(n)s(pred, %(vs)s) \\
GTEST_PRED%(n)s_(pred, %(vs)s, GTEST_FATAL_FAILURE_)
""" % DEFS
return impl
def HeaderPostamble():
"""Returns the postamble for the header file."""
return """
#endif // GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
"""
def GenerateFile(path, content):
"""Given a file path and a content string, overwrites it with the
given content."""
print 'Updating file %s . . .' % path
f = file(path, 'w+')
print >>f, content,
f.close()
print 'File %s has been updated.' % path
def GenerateHeader(n):
"""Given the maximum arity n, updates the header file that implements
the predicate assertions."""
GenerateFile(HEADER,
HeaderPreamble(n)
+ ''.join([ImplementationForArity(i) for i in OneTo(n)])
+ HeaderPostamble())
def UnitTestPreamble():
"""Returns the preamble for the unit test file."""
# A map that defines the values used in the preamble template.
DEFS = {
'today' : time.strftime('%m/%d/%Y'),
'year' : time.strftime('%Y'),
'command' : '%s %s' % (os.path.basename(sys.argv[0]), sys.argv[1]),
}
return (
"""// Copyright 2006, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is AUTOMATICALLY GENERATED on %(today)s by command
// '%(command)s'. DO NOT EDIT BY HAND!
// Regression test for gtest_pred_impl.h
//
// This file is generated by a script and quite long. If you intend to
// learn how Google Test works by reading its unit tests, read
// gtest_unittest.cc instead.
//
// This is intended as a regression test for the Google Test predicate
// assertions. We compile it as part of the gtest_unittest target
// only to keep the implementation tidy and compact, as it is quite
// involved to set up the stage for testing Google Test using Google
// Test itself.
//
// Currently, gtest_unittest takes ~11 seconds to run in the testing
// daemon. In the future, if it grows too large and needs much more
// time to finish, we should consider separating this file into a
// stand-alone regression test.
#include <iostream>
#include "gtest/gtest.h"
#include "gtest/gtest-spi.h"
// A user-defined data type.
struct Bool {
explicit Bool(int val) : value(val != 0) {}
bool operator>(int n) const { return value > Bool(n).value; }
Bool operator+(const Bool& rhs) const { return Bool(value + rhs.value); }
bool operator==(const Bool& rhs) const { return value == rhs.value; }
bool value;
};
// Enables Bool to be used in assertions.
std::ostream& operator<<(std::ostream& os, const Bool& x) {
return os << (x.value ? "true" : "false");
}
""" % DEFS)
def TestsForArity(n):
"""Returns the tests for n-ary predicate assertions."""
# A map that defines the values used in the template for the tests.
DEFS = {
'n' : n,
'es' : Iter(n, 'e%s', sep=', '),
'vs' : Iter(n, 'v%s', sep=', '),
'vts' : Iter(n, '#v%s', sep=', '),
'tvs' : Iter(n, 'T%s v%s', sep=', '),
'int_vs' : Iter(n, 'int v%s', sep=', '),
'Bool_vs' : Iter(n, 'Bool v%s', sep=', '),
'types' : Iter(n, 'typename T%s', sep=', '),
'v_sum' : Iter(n, 'v%s', sep=' + '),
'arity' : Arity(n),
'Arity' : Title(Arity(n)),
}
tests = (
"""// Sample functions/functors for testing %(arity)s predicate assertions.
// A %(arity)s predicate function.
template <%(types)s>
bool PredFunction%(n)s(%(tvs)s) {
return %(v_sum)s > 0;
}
// The following two functions are needed to circumvent a bug in
// gcc 2.95.3, which sometimes has problem with the above template
// function.
bool PredFunction%(n)sInt(%(int_vs)s) {
return %(v_sum)s > 0;
}
bool PredFunction%(n)sBool(%(Bool_vs)s) {
return %(v_sum)s > 0;
}
""" % DEFS)
tests += """
// A %(arity)s predicate functor.
struct PredFunctor%(n)s {
template <%(types)s>
bool operator()(""" % DEFS
tests += Iter(n, 'const T%s& v%s', sep=""",
""")
tests += """) {
return %(v_sum)s > 0;
}
};
""" % DEFS
tests += """
// A %(arity)s predicate-formatter function.
template <%(types)s>
testing::AssertionResult PredFormatFunction%(n)s(""" % DEFS
tests += Iter(n, 'const char* e%s', sep=""",
""")
tests += Iter(n, """,
const T%s& v%s""")
tests += """) {
if (PredFunction%(n)s(%(vs)s))
return testing::AssertionSuccess();
return testing::AssertionFailure()
<< """ % DEFS
tests += Iter(n, 'e%s', sep=' << " + " << ')
tests += """
<< " is expected to be positive, but evaluates to "
<< %(v_sum)s << ".";
}
""" % DEFS
tests += """
// A %(arity)s predicate-formatter functor.
struct PredFormatFunctor%(n)s {
template <%(types)s>
testing::AssertionResult operator()(""" % DEFS
tests += Iter(n, 'const char* e%s', sep=""",
""")
tests += Iter(n, """,
const T%s& v%s""")
tests += """) const {
return PredFormatFunction%(n)s(%(es)s, %(vs)s);
}
};
""" % DEFS
tests += """
// Tests for {EXPECT|ASSERT}_PRED_FORMAT%(n)s.
class Predicate%(n)sTest : public testing::Test {
protected:
virtual void SetUp() {
expected_to_finish_ = true;
finished_ = false;""" % DEFS
tests += """
""" + Iter(n, 'n%s_ = ') + """0;
}
"""
tests += """
virtual void TearDown() {
// Verifies that each of the predicate's arguments was evaluated
// exactly once."""
tests += ''.join(["""
EXPECT_EQ(1, n%s_) <<
"The predicate assertion didn't evaluate argument %s "
"exactly once.";""" % (i, i + 1) for i in OneTo(n)])
tests += """
// Verifies that the control flow in the test function is expected.
if (expected_to_finish_ && !finished_) {
FAIL() << "The predicate assertion unexpactedly aborted the test.";
} else if (!expected_to_finish_ && finished_) {
FAIL() << "The failed predicate assertion didn't abort the test "
"as expected.";
}
}
// true iff the test function is expected to run to finish.
static bool expected_to_finish_;
// true iff the test function did run to finish.
static bool finished_;
""" % DEFS
tests += Iter(n, """
static int n%s_;""")
tests += """
};
bool Predicate%(n)sTest::expected_to_finish_;
bool Predicate%(n)sTest::finished_;
""" % DEFS
tests += Iter(n, """int Predicate%%(n)sTest::n%s_;
""") % DEFS
tests += """
typedef Predicate%(n)sTest EXPECT_PRED_FORMAT%(n)sTest;
typedef Predicate%(n)sTest ASSERT_PRED_FORMAT%(n)sTest;
typedef Predicate%(n)sTest EXPECT_PRED%(n)sTest;
typedef Predicate%(n)sTest ASSERT_PRED%(n)sTest;
""" % DEFS
def GenTest(use_format, use_assert, expect_failure,
use_functor, use_user_type):
"""Returns the test for a predicate assertion macro.
Args:
use_format: true iff the assertion is a *_PRED_FORMAT*.
use_assert: true iff the assertion is a ASSERT_*.
expect_failure: true iff the assertion is expected to fail.
use_functor: true iff the first argument of the assertion is
a functor (as opposed to a function)
use_user_type: true iff the predicate functor/function takes
argument(s) of a user-defined type.
Example:
GenTest(1, 0, 0, 1, 0) returns a test that tests the behavior
of a successful EXPECT_PRED_FORMATn() that takes a functor
whose arguments have built-in types."""
if use_assert:
assrt = 'ASSERT' # 'assert' is reserved, so we cannot use
# that identifier here.
else:
assrt = 'EXPECT'
assertion = assrt + '_PRED'
if use_format:
pred_format = 'PredFormat'
assertion += '_FORMAT'
else:
pred_format = 'Pred'
assertion += '%(n)s' % DEFS
if use_functor:
pred_format_type = 'functor'
pred_format += 'Functor%(n)s()'
else:
pred_format_type = 'function'
pred_format += 'Function%(n)s'
if not use_format:
if use_user_type:
pred_format += 'Bool'
else:
pred_format += 'Int'
test_name = pred_format_type.title()
if use_user_type:
arg_type = 'user-defined type (Bool)'
test_name += 'OnUserType'
if expect_failure:
arg = 'Bool(n%s_++)'
else:
arg = 'Bool(++n%s_)'
else:
arg_type = 'built-in type (int)'
test_name += 'OnBuiltInType'
if expect_failure:
arg = 'n%s_++'
else:
arg = '++n%s_'
if expect_failure:
successful_or_failed = 'failed'
expected_or_not = 'expected.'
test_name += 'Failure'
else:
successful_or_failed = 'successful'
expected_or_not = 'UNEXPECTED!'
test_name += 'Success'
# A map that defines the values used in the test template.
defs = DEFS.copy()
defs.update({
'assert' : assrt,
'assertion' : assertion,
'test_name' : test_name,
'pf_type' : pred_format_type,
'pf' : pred_format,
'arg_type' : arg_type,
'arg' : arg,
'successful' : successful_or_failed,
'expected' : expected_or_not,
})
test = """
// Tests a %(successful)s %(assertion)s where the
// predicate-formatter is a %(pf_type)s on a %(arg_type)s.
TEST_F(%(assertion)sTest, %(test_name)s) {""" % defs
indent = (len(assertion) + 3)*' '
extra_indent = ''
if expect_failure:
extra_indent = ' '
if use_assert:
test += """
expected_to_finish_ = false;
EXPECT_FATAL_FAILURE({ // NOLINT"""
else:
test += """
EXPECT_NONFATAL_FAILURE({ // NOLINT"""
test += '\n' + extra_indent + """ %(assertion)s(%(pf)s""" % defs
test = test % defs
test += Iter(n, ',\n' + indent + extra_indent + '%(arg)s' % defs)
test += ');\n' + extra_indent + ' finished_ = true;\n'
if expect_failure:
test += ' }, "");\n'
test += '}\n'
return test
# Generates tests for all 2**6 = 64 combinations.
tests += ''.join([GenTest(use_format, use_assert, expect_failure,
use_functor, use_user_type)
for use_format in [0, 1]
for use_assert in [0, 1]
for expect_failure in [0, 1]
for use_functor in [0, 1]
for use_user_type in [0, 1]
])
return tests
def UnitTestPostamble():
"""Returns the postamble for the tests."""
return ''
def GenerateUnitTest(n):
"""Returns the tests for up-to n-ary predicate assertions."""
GenerateFile(UNIT_TEST,
UnitTestPreamble()
+ ''.join([TestsForArity(i) for i in OneTo(n)])
+ UnitTestPostamble())
def _Main():
"""The entry point of the script. Generates the header file and its
unit test."""
if len(sys.argv) != 2:
print __doc__
print 'Author: ' + __author__
sys.exit(1)
n = int(sys.argv[1])
GenerateHeader(n)
GenerateUnitTest(n)
if __name__ == '__main__':
_Main()
| agpl-3.0 |
mayragomez/Chapter05_Tigram | plugins/ti.alloy/plugin.py | 1729 | 5251 | import os, sys, subprocess, hashlib
import subprocess
def check_output(*popenargs, **kwargs):
r"""Run command with arguments and return its output as a byte string.
Backported from Python 2.7 as it's implemented as pure python on stdlib.
>>> check_output(['/usr/bin/python', '--version'])
Python 2.6.2
"""
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
error = subprocess.CalledProcessError(retcode, cmd)
error.output = output
raise error
return output
def compile(config):
paths = {}
binaries = ["alloy","node"]
dotAlloy = os.path.abspath(os.path.join(config['project_dir'], 'build', '.alloynewcli'))
if os.path.exists(dotAlloy):
print "[DEBUG] build/.alloynewcli file found, skipping plugin..."
os.remove(dotAlloy)
else:
for binary in binaries:
try:
# see if the environment variable is defined
paths[binary] = os.environ["ALLOY_" + ("NODE_" if binary == "node" else "") + "PATH"]
except KeyError as ex:
# next try PATH, and then our guess paths
if sys.platform == "darwin" or sys.platform.startswith('linux'):
userPath = os.environ["HOME"]
guessPaths = [
"/usr/local/bin/"+binary,
"/opt/local/bin/"+binary,
userPath+"/local/bin/"+binary,
"/opt/bin/"+binary,
"/usr/bin/"+binary,
"/usr/local/share/npm/bin/"+binary
]
try:
binaryPath = check_output(["which",binary], stderr=subprocess.STDOUT).strip()
print "[DEBUG] %s installed at '%s'" % (binary,binaryPath)
except:
print "[WARN] Couldn't find %s on your PATH:" % binary
print "[WARN] %s" % os.environ["PATH"]
print "[WARN]"
print "[WARN] Checking for %s in a few default locations:" % binary
for p in guessPaths:
sys.stdout.write("[WARN] %s -> " % p)
if os.path.exists(p):
binaryPath = p
print "FOUND"
break
else:
print "not found"
binaryPath = None
if binaryPath is None:
print "[ERROR] Couldn't find %s" % binary
sys.exit(1)
else:
paths[binary] = binaryPath
# no guesses on windows, just use the PATH
elif sys.platform == "win32":
paths["alloy"] = "alloy.cmd"
f = os.path.abspath(os.path.join(config['project_dir'], 'app'))
if os.path.exists(f):
print "[INFO] alloy app found at %s" % f
rd = os.path.abspath(os.path.join(config['project_dir'], 'Resources'))
devicefamily = 'none'
simtype = 'none'
version = '0'
deploytype = 'development'
if config['platform']==u'ios':
version = config['iphone_version']
devicefamily = config['devicefamily']
deploytype = config['deploytype']
if config['platform']==u'android':
builder = config['android_builder']
version = builder.tool_api_level
deploytype = config['deploy_type']
if config['platform']==u'mobileweb':
builder = config['mobileweb_builder']
deploytype = config['deploytype']
cfg = "platform=%s,version=%s,simtype=%s,devicefamily=%s,deploytype=%s," % (config['platform'],version,simtype,devicefamily,deploytype)
if sys.platform == "win32":
cmd = [paths["alloy"], "compile", f, "--no-colors", "--config", cfg]
else:
cmd = [paths["node"], paths["alloy"], "compile", f, "--no-colors", "--config", cfg]
print "[INFO] Executing Alloy compile:"
print "[INFO] %s" % " ".join(cmd)
try:
print check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as ex:
if hasattr(ex, 'output'):
print ex.output
print "[ERROR] Alloy compile failed"
retcode = 1
if hasattr(ex, 'returncode'):
retcode = ex.returncode
sys.exit(retcode)
except EnvironmentError as ex:
print "[ERROR] Unexpected error with Alloy compiler plugin: %s" % ex.strerror
sys.exit(2)
| apache-2.0 |
arnaud-morvan/QGIS | python/plugins/processing/gui/matrixmodelerwidget.py | 11 | 5263 | # -*- coding: utf-8 -*-
"""
***************************************************************************
MatrixModelerWidget.py
---------------------
Date : May 2018
Copyright : (C) 2018 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'May 2018'
__copyright__ = '(C) 2018, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import warnings
from qgis.PyQt import uic
from qgis.PyQt.QtCore import Qt
from qgis.PyQt.QtGui import QStandardItemModel, QStandardItem
from qgis.PyQt.QtWidgets import QInputDialog, QMessageBox
from qgis.core import QgsApplication
pluginPath = os.path.split(os.path.dirname(__file__))[0]
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
WIDGET, BASE = uic.loadUiType(
os.path.join(pluginPath, 'ui', 'matrixmodelerwidgetbase.ui'))
class MatrixModelerWidget(BASE, WIDGET):
def __init__(self, parent=None):
super(MatrixModelerWidget, self).__init__(parent)
self.setupUi(self)
self.btnAddColumn.setIcon(QgsApplication.getThemeIcon('/mActionNewAttribute.svg'))
self.btnRemoveColumn.setIcon(QgsApplication.getThemeIcon('/mActionDeleteAttribute.svg'))
self.btnAddRow.setIcon(QgsApplication.getThemeIcon('/symbologyAdd.svg'))
self.btnRemoveRow.setIcon(QgsApplication.getThemeIcon('/symbologyRemove.svg'))
self.btnClear.setIcon(QgsApplication.getThemeIcon('console/iconClearConsole.svg'))
self.btnAddColumn.clicked.connect(self.addColumn)
self.btnRemoveColumn.clicked.connect(self.removeColumns)
self.btnAddRow.clicked.connect(self.addRow)
self.btnRemoveRow.clicked.connect(self.removeRows)
self.btnClear.clicked.connect(self.clearTable)
items = [QStandardItem('0')]
model = QStandardItemModel()
model.appendColumn(items)
self.tblView.setModel(model)
self.tblView.horizontalHeader().sectionDoubleClicked.connect(self.changeHeader)
def addColumn(self):
model = self.tblView.model()
items = [QStandardItem('0') for i in range(model.rowCount())]
model.appendColumn(items)
def removeColumns(self):
indexes = sorted(self.tblView.selectionModel().selectedColumns())
self.tblView.setUpdatesEnabled(False)
for i in reversed(indexes):
self.tblView.model().removeColumns(i.column(), 1)
self.tblView.setUpdatesEnabled(True)
def addRow(self):
model = self.tblView.model()
items = [QStandardItem('0') for i in range(model.columnCount())]
model.appendRow(items)
def removeRows(self):
indexes = sorted(self.tblView.selectionModel().selectedRows())
self.tblView.setUpdatesEnabled(False)
for i in reversed(indexes):
self.tblView.model().removeRows(i.row(), 1)
self.tblView.setUpdatesEnabled(True)
def clearTable(self, removeAll=False):
res = QMessageBox.question(self, self.tr('Clear?'), self.tr('Are you sure you want to clear table?'))
if res == QMessageBox.Yes:
self.tblView.model().clear()
def changeHeader(self, index):
txt, ok = QInputDialog.getText(self, self.tr("Enter column name"), self.tr("Column name"))
if ok:
self.tblView.model().setHeaderData(index, Qt.Horizontal, txt)
def value(self):
cols = self.tblView.model().columnCount()
rows = self.tblView.model().rowCount()
items = []
for row in range(rows):
for col in range(cols):
items.append(str(self.tblView.model().item(row, col).text()))
return items
def setValue(self, headers, table):
model = self.tblView.model()
model.setHorizontalHeaderLabels(headers)
cols = len(headers)
rows = len(table) // cols
model = QStandardItemModel(rows, cols)
for row in range(rows):
for col in range(cols):
item = QStandardItem(str(table[row * cols + col]))
model.setItem(row, col, item)
self.tblView.setModel(model)
def headers(self):
headers = []
model = self.tblView.model()
for i in range(model.columnCount()):
headers.append(str(model.headerData(i, Qt.Horizontal)))
return headers
def fixedRows(self):
return self.chkFixedRows.isChecked()
def setFixedRows(self, fixedRows):
self.chkFixedRows.setChecked(fixedRows)
| gpl-2.0 |
stephen144/odoo | openerp/addons/base/ir/ir_values.py | 8 | 25408 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp import tools
from openerp.osv import osv, fields
from openerp.exceptions import AccessError, MissingError
from openerp.tools.translate import _
from openerp.tools import pickle
EXCLUDED_FIELDS = set((
'report_sxw_content', 'report_rml_content', 'report_sxw', 'report_rml',
'report_sxw_content_data', 'report_rml_content_data', 'search_view', ))
#: Possible slots to bind an action to with :meth:`~.set_action`
ACTION_SLOTS = [
"client_action_multi", # sidebar wizard action
"client_print_multi", # sidebar report printing button
"client_action_relate", # sidebar related link
"tree_but_open", # double-click on item in tree view
"tree_but_action", # deprecated: same as tree_but_open
]
class ir_values(osv.osv):
"""Holds internal model-specific action bindings and user-defined default
field values. definitions. This is a legacy internal model, mixing
two different concepts, and will likely be updated or replaced in a
future version by cleaner, separate models. You should not depend
explicitly on it.
The purpose of each ``ir.values`` entry depends on its type, defined
by the ``key`` column:
* 'default': user-defined default values, used when creating new
records of this model:
* 'action': binding of an action to a particular *action slot* of
this model, making the action easily available in the user
interface for this model.
The ``key2`` column acts as a qualifier, further refining the type
of the entry. The possible values are:
* for 'default' entries: an optional condition restricting the
cases where this particular default value will be applicable,
or ``False`` for no condition
* for 'action' entries: the ``key2`` qualifier is one of the available
action slots, defining how this action can be invoked:
* ``'client_print_multi'`` for report printing actions that will
be available on views displaying items from this model
* ``'client_action_multi'`` for assistants (wizards) actions
that will be available in views displaying objects of this model
* ``'client_action_relate'`` for links towards related documents
that should be available in views displaying objects of this model
* ``'tree_but_open'`` for actions that will be triggered when
double-clicking an item from this model in a hierarchical tree view
Each entry is specific to a model (``model`` column), and for ``'actions'``
type, may even be made specific to a given record of that model when the
``res_id`` column contains a record ID (``False`` means it's global for
all records).
The content of the entry is defined by the ``value`` column, which may either
contain an arbitrary value, or a reference string defining the action that
should be executed.
.. rubric:: Usage: default values
The ``'default'`` entries are usually defined manually by the
users, and set by their UI clients calling :meth:`~.set_default`.
These default values are then automatically used by the
ORM every time a new record is about to be created, i.e. when
:meth:`~openerp.osv.osv.osv.default_get`
or :meth:`~openerp.osv.osv.osv.create` are called.
.. rubric:: Usage: action bindings
Business applications will usually bind their actions during
installation, and OpenERP UI clients will apply them as defined,
based on the list of actions included in the result of
:meth:`~openerp.osv.osv.osv.fields_view_get`,
or directly returned by explicit calls to :meth:`~.get_actions`.
"""
_name = 'ir.values'
def _value_unpickle(self, cursor, user, ids, name, arg, context=None):
res = {}
for record in self.browse(cursor, user, ids, context=context):
value = record[name[:-9]]
if record.key == 'default' and value:
# default values are pickled on the fly
try:
value = str(pickle.loads(value))
except Exception:
pass
res[record.id] = value
return res
def _value_pickle(self, cursor, user, id, name, value, arg, context=None):
if context is None:
context = {}
ctx = context.copy()
if self.CONCURRENCY_CHECK_FIELD in ctx:
del ctx[self.CONCURRENCY_CHECK_FIELD]
record = self.browse(cursor, user, id, context=context)
if record.key == 'default':
# default values are pickled on the fly
value = pickle.dumps(value)
self.write(cursor, user, id, {name[:-9]: value}, context=ctx)
def onchange_object_id(self, cr, uid, ids, object_id, context=None):
if not object_id: return {}
act = self.pool.get('ir.model').browse(cr, uid, object_id, context=context)
return {
'value': {'model': act.model}
}
def onchange_action_id(self, cr, uid, ids, action_id, context=None):
if not action_id: return {}
act = self.pool.get('ir.actions.actions').browse(cr, uid, action_id, context=context)
return {
'value': {'value_unpickle': act.type+','+str(act.id)}
}
_columns = {
'name': fields.char('Name', required=True),
'model': fields.char('Model Name', select=True, required=True,
help="Model to which this entry applies"),
# TODO: model_id and action_id should be read-write function fields
'model_id': fields.many2one('ir.model', 'Model (change only)', size=128,
help="Model to which this entry applies - "
"helper field for setting a model, will "
"automatically set the correct model name"),
'action_id': fields.many2one('ir.actions.actions', 'Action (change only)',
help="Action bound to this entry - "
"helper field for binding an action, will "
"automatically set the correct reference"),
'value': fields.text('Value', help="Default value (pickled) or reference to an action"),
'value_unpickle': fields.function(_value_unpickle, fnct_inv=_value_pickle,
type='text',
string='Default value or action reference'),
'key': fields.selection([('action','Action'),('default','Default')],
'Type', select=True, required=True,
help="- Action: an action attached to one slot of the given model\n"
"- Default: a default value for a model field"),
'key2' : fields.char('Qualifier', select=True,
help="For actions, one of the possible action slots: \n"
" - client_action_multi\n"
" - client_print_multi\n"
" - client_action_relate\n"
" - tree_but_open\n"
"For defaults, an optional condition"
,),
'res_id': fields.integer('Record ID', select=True,
help="Database identifier of the record to which this applies. "
"0 = for all records"),
'user_id': fields.many2one('res.users', 'User', ondelete='cascade', select=True,
help="If set, action binding only applies for this user."),
'company_id': fields.many2one('res.company', 'Company', ondelete='cascade', select=True,
help="If set, action binding only applies for this company")
}
_defaults = {
'key': 'action',
'key2': 'tree_but_open',
}
def _auto_init(self, cr, context=None):
super(ir_values, self)._auto_init(cr, context)
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = \'ir_values_key_model_key2_res_id_user_id_idx\'')
if not cr.fetchone():
cr.execute('CREATE INDEX ir_values_key_model_key2_res_id_user_id_idx ON ir_values (key, model, key2, res_id, user_id)')
def create(self, cr, uid, vals, context=None):
res = super(ir_values, self).create(cr, uid, vals, context=context)
self.clear_caches()
return res
def write(self, cr, uid, ids, vals, context=None):
res = super(ir_values, self).write(cr, uid, ids, vals, context=context)
self.clear_caches()
return res
def unlink(self, cr, uid, ids, context=None):
res = super(ir_values, self).unlink(cr, uid, ids, context=context)
self.clear_caches()
return res
def set_default(self, cr, uid, model, field_name, value, for_all_users=True, company_id=False, condition=False):
"""Defines a default value for the given model and field_name. Any previous
default for the same scope (model, field_name, value, for_all_users, company_id, condition)
will be replaced and lost in the process.
Defaults can be later retrieved via :meth:`~.get_defaults`, which will return
the highest priority default for any given field. Defaults that are more specific
have a higher priority, in the following order (highest to lowest):
* specific to user and company
* specific to user only
* specific to company only
* global to everyone
:param string model: model name
:param string field_name: field name to which the default applies
:param value: the default field value to set
:type value: any serializable Python value
:param bool for_all_users: whether the default should apply to everybody or only
the user calling the method
:param int company_id: optional ID of the company to which the default should
apply. If omitted, the default will be global. If True
is passed, the current user's company will be used.
:param string condition: optional condition specification that can be used to
restrict the applicability of the default values
(e.g. based on another field's value). This is an
opaque string as far as the API is concerned, but client
stacks typically use single-field conditions in the
form ``'key=stringified_value'``.
(Currently, the condition is trimmed to 200 characters,
so values that share the same first 200 characters always
match)
:return: id of the newly created ir.values entry
"""
if isinstance(value, unicode):
value = value.encode('utf8')
if company_id is True:
# should be company-specific, need to get company id
user = self.pool.get('res.users').browse(cr, uid, uid)
company_id = user.company_id.id
# remove existing defaults for the same scope
search_criteria = [
('key', '=', 'default'),
('key2', '=', condition and condition[:200]),
('model', '=', model),
('name', '=', field_name),
('user_id', '=', False if for_all_users else uid),
('company_id','=', company_id)
]
self.unlink(cr, uid, self.search(cr, uid, search_criteria))
return self.create(cr, uid, {
'name': field_name,
'value': pickle.dumps(value),
'model': model,
'key': 'default',
'key2': condition and condition[:200],
'user_id': False if for_all_users else uid,
'company_id': company_id,
})
def get_default(self, cr, uid, model, field_name, for_all_users=True, company_id=False, condition=False):
""" Return the default value defined for model, field_name, users, company and condition.
Return ``None`` if no such default exists.
"""
search_criteria = [
('key', '=', 'default'),
('key2', '=', condition and condition[:200]),
('model', '=', model),
('name', '=', field_name),
('user_id', '=', False if for_all_users else uid),
('company_id','=', company_id)
]
defaults = self.browse(cr, uid, self.search(cr, uid, search_criteria))
return pickle.loads(defaults[0].value.encode('utf-8')) if defaults else None
def get_defaults(self, cr, uid, model, condition=False):
"""Returns any default values that are defined for the current model and user,
(and match ``condition``, if specified), previously registered via
:meth:`~.set_default`.
Defaults are global to a model, not field-specific, but an optional
``condition`` can be provided to restrict matching default values
to those that were defined for the same condition (usually based
on another field's value).
Default values also have priorities depending on whom they apply
to: only the highest priority value will be returned for any
field. See :meth:`~.set_default` for more details.
:param string model: model name
:param string condition: optional condition specification that can be used to
restrict the applicability of the default values
(e.g. based on another field's value). This is an
opaque string as far as the API is concerned, but client
stacks typically use single-field conditions in the
form ``'key=stringified_value'``.
(Currently, the condition is trimmed to 200 characters,
so values that share the same first 200 characters always
match)
:return: list of default values tuples of the form ``(id, field_name, value)``
(``id`` is the ID of the default entry, usually irrelevant)
"""
# use a direct SQL query for performance reasons,
# this is called very often
query = """SELECT v.id, v.name, v.value FROM ir_values v
LEFT JOIN res_users u ON (v.user_id = u.id)
WHERE v.key = %%s AND v.model = %%s
AND (v.user_id = %%s OR v.user_id IS NULL)
AND (v.company_id IS NULL OR
v.company_id =
(SELECT company_id from res_users where id = %%s)
)
%s
ORDER BY v.user_id, u.company_id"""
params = ('default', model, uid, uid)
if condition:
query %= 'AND v.key2 = %s'
params += (condition[:200],)
else:
query %= 'AND v.key2 is NULL'
cr.execute(query, params)
# keep only the highest priority default for each field
defaults = {}
for row in cr.dictfetchall():
defaults.setdefault(row['name'],
(row['id'], row['name'], pickle.loads(row['value'].encode('utf-8'))))
return defaults.values()
# use ormcache: this is called a lot by BaseModel.default_get()!
@tools.ormcache('uid', 'model', 'condition')
def get_defaults_dict(self, cr, uid, model, condition=False):
""" Returns a dictionary mapping field names with their corresponding
default value. This method simply improves the returned value of
:meth:`~.get_defaults`.
"""
return dict((f, v) for i, f, v in self.get_defaults(cr, uid, model, condition))
def set_action(self, cr, uid, name, action_slot, model, action, res_id=False):
"""Binds an the given action to the given model's action slot - for later
retrieval via :meth:`~.get_actions`. Any existing binding of the same action
to the same slot is first removed, allowing an update of the action's name.
See the class description for more details about the various action
slots: :class:`~ir_values`.
:param string name: action label, usually displayed by UI client
:param string action_slot: the action slot to which the action should be
bound to - one of ``client_action_multi``,
``client_print_multi``, ``client_action_relate``,
``tree_but_open``.
:param string model: model name
:param string action: action reference, in the form ``'model,id'``
:param int res_id: optional record id - will bind the action only to a
specific record of the model, not all records.
:return: id of the newly created ir.values entry
"""
assert isinstance(action, basestring) and ',' in action, \
'Action definition must be an action reference, e.g. "ir.actions.act_window,42"'
assert action_slot in ACTION_SLOTS, \
'Action slot (%s) must be one of: %r' % (action_slot, ACTION_SLOTS)
# remove existing action definition of same slot and value
search_criteria = [
('key', '=', 'action'),
('key2', '=', action_slot),
('model', '=', model),
('res_id', '=', res_id or 0), # int field -> NULL == 0
('value', '=', action),
]
self.unlink(cr, uid, self.search(cr, uid, search_criteria))
return self.create(cr, uid, {
'key': 'action',
'key2': action_slot,
'model': model,
'res_id': res_id,
'name': name,
'value': action,
})
@tools.ormcache_context('uid', 'action_slot', 'model', 'res_id', keys=('lang',))
def get_actions(self, cr, uid, action_slot, model, res_id=False, context=None):
"""Retrieves the list of actions bound to the given model's action slot.
See the class description for more details about the various action
slots: :class:`~.ir_values`.
:param string action_slot: the action slot to which the actions should be
bound to - one of ``client_action_multi``,
``client_print_multi``, ``client_action_relate``,
``tree_but_open``.
:param string model: model name
:param int res_id: optional record id - will bind the action only to a
specific record of the model, not all records.
:return: list of action tuples of the form ``(id, name, action_def)``,
where ``id`` is the ID of the default entry, ``name`` is the
action label, and ``action_def`` is a dict containing the
action definition as obtained by calling
:meth:`~openerp.osv.osv.osv.read` on the action record.
"""
assert action_slot in ACTION_SLOTS, 'Illegal action slot value: %s' % action_slot
# use a direct SQL query for performance reasons,
# this is called very often
query = """SELECT v.id, v.name, v.value FROM ir_values v
WHERE v.key = %s AND v.key2 = %s
AND v.model = %s
AND (v.res_id = %s
OR v.res_id IS NULL
OR v.res_id = 0)
ORDER BY v.id"""
cr.execute(query, ('action', action_slot, model, res_id or None))
# map values to their corresponding action record
actions = []
for id, name, value in cr.fetchall():
if not value:
continue # skip if undefined
action_model, action_id = value.split(',')
if action_model not in self.pool:
continue # unknown model? skip it!
action = self.pool[action_model].browse(cr, uid, int(action_id), context)
actions.append((id, name, action))
# process values and their action
user = self.pool['res.users'].browse(cr, uid, uid, context)
results = {}
for id, name, action in actions:
fields = [field for field in action._fields if field not in EXCLUDED_FIELDS]
# FIXME: needs cleanup
try:
action_def = {
field: action._fields[field].convert_to_read(action[field])
for field in fields
}
if action._name in ('ir.actions.report.xml', 'ir.actions.act_window'):
if action.groups_id and not action.groups_id & user.groups_id:
if name == 'Menuitem':
raise AccessError(_('You do not have the permission to perform this operation!!!'))
continue
# keep only the last action registered for each action name
results[name] = (id, name, action_def)
except (AccessError, MissingError):
continue
return sorted(results.values())
def _map_legacy_model_list(self, model_list, map_fn, merge_results=False):
"""Apply map_fn to the various models passed, according to
legacy way to specify models/records.
"""
assert isinstance(model_list, (list, tuple)), \
"model_list should be in the form [model,..] or [(model,res_id), ..]"
results = []
for model in model_list:
res_id = False
if isinstance(model, (list, tuple)):
model, res_id = model
result = map_fn(model, res_id)
# some of the functions return one result at a time (tuple or id)
# and some return a list of many of them - care for both
if merge_results:
results.extend(result)
else:
results.append(result)
return results
# Backards-compatibility adapter layer to retrofit into split API
def set(self, cr, uid, key, key2, name, models, value, replace=True, isobject=False, meta=False, preserve_user=False, company=False):
"""Deprecated legacy method to set default values and bind actions to models' action slots.
Now dispatches to the newer API methods according to the value of ``key``: :meth:`~.set_default`
(``key=='default'``) or :meth:`~.set_action` (``key == 'action'``).
:deprecated: As of v6.1, ``set_default()`` or ``set_action()`` should be used directly.
"""
assert key in ['default', 'action'], "ir.values entry keys must be in ['default','action']"
if key == 'default':
def do_set(model,res_id):
return self.set_default(cr, uid, model, field_name=name, value=value,
for_all_users=(not preserve_user), company_id=company,
condition=key2)
elif key == 'action':
def do_set(model,res_id):
return self.set_action(cr, uid, name, action_slot=key2, model=model, action=value, res_id=res_id)
return self._map_legacy_model_list(models, do_set)
def get(self, cr, uid, key, key2, models, meta=False, context=None, res_id_req=False, without_user=True, key2_req=True):
"""Deprecated legacy method to get the list of default values or actions bound to models' action slots.
Now dispatches to the newer API methods according to the value of ``key``: :meth:`~.get_defaults`
(``key=='default'``) or :meth:`~.get_actions` (``key == 'action'``)
:deprecated: As of v6.1, ``get_defaults()`` or ``get_actions()`` should be used directly.
"""
assert key in ['default', 'action'], "ir.values entry keys must be in ['default','action']"
if key == 'default':
def do_get(model,res_id):
return self.get_defaults(cr, uid, model, condition=key2)
elif key == 'action':
def do_get(model,res_id):
return self.get_actions(cr, uid, action_slot=key2, model=model, res_id=res_id, context=context)
return self._map_legacy_model_list(models, do_get, merge_results=True)
| agpl-3.0 |
SimVascular/VTK | Filters/Hybrid/Testing/Python/TestPCA.py | 20 | 8348 | #!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# This example shows how to visualise the variation in shape in a set of objects using
# vtkPCAAnalysisFilter.
#
# We make three ellipsoids by distorting and translating a sphere and then align them together
# using vtkProcrustesAlignmentFilter, and then pass the output to vtkPCAAnalysisFilter. We visualise
# the first and second modes - the major sources of variation that were in the training set.
sphere = vtk.vtkSphereSource()
sphere.SetPhiResolution(36)
sphere.SetThetaResolution(36)
sphere.Update()
# make two copies of the shape and distort them a little
transform1 = vtk.vtkTransform()
transform1.Translate(0.2,0.1,0.3)
transform1.Scale(1.3,1.1,0.8)
transform2 = vtk.vtkTransform()
transform2.Translate(0.3,0.7,0.1)
transform2.Scale(1.0,0.1,1.8)
transformer1 = vtk.vtkTransformPolyDataFilter()
transformer1.SetInputConnection(sphere.GetOutputPort())
transformer1.SetTransform(transform1)
transformer1.Update()
transformer2 = vtk.vtkTransformPolyDataFilter()
transformer2.SetInputConnection(sphere.GetOutputPort())
transformer2.SetTransform(transform2)
transformer2.Update()
#------------------------------------------------------------------
# map these three shapes into the first renderer
#------------------------------------------------------------------
map1a = vtk.vtkPolyDataMapper()
map1a.SetInputConnection(sphere.GetOutputPort())
Actor1a = vtk.vtkActor()
Actor1a.SetMapper(map1a)
Actor1a.GetProperty().SetDiffuseColor(1.0000,0.3882,0.2784)
map1b = vtk.vtkPolyDataMapper()
map1b.SetInputConnection(transformer1.GetOutputPort())
Actor1b = vtk.vtkActor()
Actor1b.SetMapper(map1b)
Actor1b.GetProperty().SetDiffuseColor(0.3882,1.0000,0.2784)
map1c = vtk.vtkPolyDataMapper()
map1c.SetInputConnection(transformer2.GetOutputPort())
Actor1c = vtk.vtkActor()
Actor1c.SetMapper(map1c)
Actor1c.GetProperty().SetDiffuseColor(0.3882,0.2784,1.0000)
#------------------------------------------------------------------
# align the shapes using Procrustes (using SetModeToRigidBody)
# and map the aligned shapes into the second renderer
#------------------------------------------------------------------
group = vtk.vtkMultiBlockDataGroupFilter()
group.AddInputConnection(sphere.GetOutputPort())
group.AddInputConnection(transformer1.GetOutputPort())
group.AddInputConnection(transformer2.GetOutputPort())
procrustes = vtk.vtkProcrustesAlignmentFilter()
procrustes.SetInputConnection(group.GetOutputPort())
procrustes.GetLandmarkTransform().SetModeToRigidBody()
procrustes.Update()
map2a = vtk.vtkPolyDataMapper()
map2a.SetInputData(procrustes.GetOutput().GetBlock(0))
Actor2a = vtk.vtkActor()
Actor2a.SetMapper(map2a)
Actor2a.GetProperty().SetDiffuseColor(1.0000,0.3882,0.2784)
map2b = vtk.vtkPolyDataMapper()
map2b.SetInputData(procrustes.GetOutput().GetBlock(1))
Actor2b = vtk.vtkActor()
Actor2b.SetMapper(map2b)
Actor2b.GetProperty().SetDiffuseColor(0.3882,1.0000,0.2784)
map2c = vtk.vtkPolyDataMapper()
map2c.SetInputData(procrustes.GetOutput().GetBlock(2))
Actor2c = vtk.vtkActor()
Actor2c.SetMapper(map2c)
Actor2c.GetProperty().SetDiffuseColor(0.3882,0.2784,1.0000)
#------------------------------------------------------------------
# pass the output of Procrustes to vtkPCAAnalysisFilter
#------------------------------------------------------------------
pca = vtk.vtkPCAAnalysisFilter()
pca.SetInputConnection(procrustes.GetOutputPort())
pca.Update()
# we need to call Update because GetParameterisedShape is not
# part of the normal SetInput/GetOutput pipeline
#------------------------------------------------------------------
# map the first mode into the third renderer:
# -3,0,3 standard deviations on the first mode
# illustrate the extremes around the average shape
#------------------------------------------------------------------
params = vtk.vtkFloatArray()
params.SetNumberOfComponents(1)
params.SetNumberOfTuples(1)
params.SetTuple1(0,0.0)
shapea = vtk.vtkPolyData()
shapea.DeepCopy(sphere.GetOutput())
pca.GetParameterisedShape(params,shapea)
normalsa = vtk.vtkPolyDataNormals()
normalsa.SetInputData(shapea)
map3a = vtk.vtkPolyDataMapper()
map3a.SetInputConnection(normalsa.GetOutputPort())
Actor3a = vtk.vtkActor()
Actor3a.SetMapper(map3a)
Actor3a.GetProperty().SetDiffuseColor(1,1,1)
params.SetTuple1(0,-3.0)
shapeb = vtk.vtkPolyData()
shapeb.DeepCopy(sphere.GetOutput())
pca.GetParameterisedShape(params,shapeb)
normalsb = vtk.vtkPolyDataNormals()
normalsb.SetInputData(shapeb)
map3b = vtk.vtkPolyDataMapper()
map3b.SetInputConnection(normalsb.GetOutputPort())
Actor3b = vtk.vtkActor()
Actor3b.SetMapper(map3b)
Actor3b.GetProperty().SetDiffuseColor(1,1,1)
params.SetTuple1(0,3.0)
shapec = vtk.vtkPolyData()
shapec.DeepCopy(sphere.GetOutput())
pca.GetParameterisedShape(params,shapec)
normalsc = vtk.vtkPolyDataNormals()
normalsc.SetInputData(shapec)
map3c = vtk.vtkPolyDataMapper()
map3c.SetInputConnection(normalsc.GetOutputPort())
Actor3c = vtk.vtkActor()
Actor3c.SetMapper(map3c)
Actor3c.GetProperty().SetDiffuseColor(1,1,1)
#------------------------------------------------------------------
# map the second mode into the fourth renderer:
#------------------------------------------------------------------
params4 = vtk.vtkFloatArray()
params4.SetNumberOfComponents(1)
params4.SetNumberOfTuples(2)
params4.SetTuple1(0,0.0)
params4.SetTuple1(1,-3.0)
shape4a = vtk.vtkPolyData()
shape4a.DeepCopy(sphere.GetOutput())
pca.GetParameterisedShape(params4,shape4a)
normals4a = vtk.vtkPolyDataNormals()
normals4a.SetInputData(shape4a)
map4a = vtk.vtkPolyDataMapper()
map4a.SetInputConnection(normals4a.GetOutputPort())
Actor4a = vtk.vtkActor()
Actor4a.SetMapper(map4a)
Actor4a.GetProperty().SetDiffuseColor(1,1,1)
params4.SetTuple1(1,0.0)
shape4b = vtk.vtkPolyData()
shape4b.DeepCopy(sphere.GetOutput())
pca.GetParameterisedShape(params4,shape4b)
normals4b = vtk.vtkPolyDataNormals()
normals4b.SetInputData(shape4b)
map4b = vtk.vtkPolyDataMapper()
map4b.SetInputConnection(normals4b.GetOutputPort())
Actor4b = vtk.vtkActor()
Actor4b.SetMapper(map4b)
Actor4b.GetProperty().SetDiffuseColor(1,1,1)
params4.SetTuple1(1,3.0)
shape4c = vtk.vtkPolyData()
shape4c.DeepCopy(sphere.GetOutput())
pca.GetParameterisedShape(params4,shape4c)
normals4c = vtk.vtkPolyDataNormals()
normals4c.SetInputData(shape4c)
map4c = vtk.vtkPolyDataMapper()
map4c.SetInputConnection(normals4c.GetOutputPort())
Actor4c = vtk.vtkActor()
Actor4c.SetMapper(map4c)
Actor4c.GetProperty().SetDiffuseColor(1,1,1)
#------------------------------------------------------------------
# Create the RenderWindow and its four Renderers
#------------------------------------------------------------------
ren1 = vtk.vtkRenderer()
ren2 = vtk.vtkRenderer()
ren3 = vtk.vtkRenderer()
ren4 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
renWin.AddRenderer(ren2)
renWin.AddRenderer(ren3)
renWin.AddRenderer(ren4)
renWin.SetSize(600,200)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer
ren1.AddActor(Actor1a)
ren1.AddActor(Actor1b)
ren1.AddActor(Actor1c)
ren2.AddActor(Actor2a)
ren2.AddActor(Actor2b)
ren2.AddActor(Actor2c)
ren3.AddActor(Actor3a)
ren3.AddActor(Actor3b)
ren3.AddActor(Actor3c)
ren4.AddActor(Actor4a)
ren4.AddActor(Actor4b)
ren4.AddActor(Actor4c)
# set the properties of the renderers
ren1.SetBackground(1,1,1)
ren1.SetViewport(0.0,0.0,0.25,1.0)
ren1.ResetCamera()
ren1.GetActiveCamera().SetPosition(1,-1,0)
ren1.ResetCamera()
ren2.SetBackground(1,1,1)
ren2.SetViewport(0.25,0.0,0.5,1.0)
ren2.ResetCamera()
ren2.GetActiveCamera().SetPosition(1,-1,0)
ren2.ResetCamera()
ren3.SetBackground(1,1,1)
ren3.SetViewport(0.5,0.0,0.75,1.0)
ren3.ResetCamera()
ren3.GetActiveCamera().SetPosition(1,-1,0)
ren3.ResetCamera()
ren4.SetBackground(1,1,1)
ren4.SetViewport(0.75,0.0,1.0,1.0)
ren4.ResetCamera()
ren4.GetActiveCamera().SetPosition(1,-1,0)
ren4.ResetCamera()
# render the image
#
renWin.Render()
# output the image to file (used to generate the initial regression image)
#vtkWindowToImageFilter to_image
#to_image SetInput renWin
#vtkPNGWriter to_png
#to_png SetFileName "TestPCA.png"
#to_png SetInputConnection [to_image GetOutputPort]
#to_png Write
# prevent the tk window from showing up then start the event loop
# --- end of script --
| bsd-3-clause |
d40223223/608 | static/Brython3.1.1-20150328-091302/Lib/formatter.py | 751 | 14930 | """Generic output formatting.
Formatter objects transform an abstract flow of formatting events into
specific output events on writer objects. Formatters manage several stack
structures to allow various properties of a writer object to be changed and
restored; writers need not be able to handle relative changes nor any sort
of ``change back'' operation. Specific writer properties which may be
controlled via formatter objects are horizontal alignment, font, and left
margin indentations. A mechanism is provided which supports providing
arbitrary, non-exclusive style settings to a writer as well. Additional
interfaces facilitate formatting events which are not reversible, such as
paragraph separation.
Writer objects encapsulate device interfaces. Abstract devices, such as
file formats, are supported as well as physical devices. The provided
implementations all work with abstract devices. The interface makes
available mechanisms for setting the properties which formatter objects
manage and inserting data into the output.
"""
import sys
AS_IS = None
class NullFormatter:
"""A formatter which does nothing.
If the writer parameter is omitted, a NullWriter instance is created.
No methods of the writer are called by NullFormatter instances.
Implementations should inherit from this class if implementing a writer
interface but don't need to inherit any implementation.
"""
def __init__(self, writer=None):
if writer is None:
writer = NullWriter()
self.writer = writer
def end_paragraph(self, blankline): pass
def add_line_break(self): pass
def add_hor_rule(self, *args, **kw): pass
def add_label_data(self, format, counter, blankline=None): pass
def add_flowing_data(self, data): pass
def add_literal_data(self, data): pass
def flush_softspace(self): pass
def push_alignment(self, align): pass
def pop_alignment(self): pass
def push_font(self, x): pass
def pop_font(self): pass
def push_margin(self, margin): pass
def pop_margin(self): pass
def set_spacing(self, spacing): pass
def push_style(self, *styles): pass
def pop_style(self, n=1): pass
def assert_line_data(self, flag=1): pass
class AbstractFormatter:
"""The standard formatter.
This implementation has demonstrated wide applicability to many writers,
and may be used directly in most circumstances. It has been used to
implement a full-featured World Wide Web browser.
"""
# Space handling policy: blank spaces at the boundary between elements
# are handled by the outermost context. "Literal" data is not checked
# to determine context, so spaces in literal data are handled directly
# in all circumstances.
def __init__(self, writer):
self.writer = writer # Output device
self.align = None # Current alignment
self.align_stack = [] # Alignment stack
self.font_stack = [] # Font state
self.margin_stack = [] # Margin state
self.spacing = None # Vertical spacing state
self.style_stack = [] # Other state, e.g. color
self.nospace = 1 # Should leading space be suppressed
self.softspace = 0 # Should a space be inserted
self.para_end = 1 # Just ended a paragraph
self.parskip = 0 # Skipped space between paragraphs?
self.hard_break = 1 # Have a hard break
self.have_label = 0
def end_paragraph(self, blankline):
if not self.hard_break:
self.writer.send_line_break()
self.have_label = 0
if self.parskip < blankline and not self.have_label:
self.writer.send_paragraph(blankline - self.parskip)
self.parskip = blankline
self.have_label = 0
self.hard_break = self.nospace = self.para_end = 1
self.softspace = 0
def add_line_break(self):
if not (self.hard_break or self.para_end):
self.writer.send_line_break()
self.have_label = self.parskip = 0
self.hard_break = self.nospace = 1
self.softspace = 0
def add_hor_rule(self, *args, **kw):
if not self.hard_break:
self.writer.send_line_break()
self.writer.send_hor_rule(*args, **kw)
self.hard_break = self.nospace = 1
self.have_label = self.para_end = self.softspace = self.parskip = 0
def add_label_data(self, format, counter, blankline = None):
if self.have_label or not self.hard_break:
self.writer.send_line_break()
if not self.para_end:
self.writer.send_paragraph((blankline and 1) or 0)
if isinstance(format, str):
self.writer.send_label_data(self.format_counter(format, counter))
else:
self.writer.send_label_data(format)
self.nospace = self.have_label = self.hard_break = self.para_end = 1
self.softspace = self.parskip = 0
def format_counter(self, format, counter):
label = ''
for c in format:
if c == '1':
label = label + ('%d' % counter)
elif c in 'aA':
if counter > 0:
label = label + self.format_letter(c, counter)
elif c in 'iI':
if counter > 0:
label = label + self.format_roman(c, counter)
else:
label = label + c
return label
def format_letter(self, case, counter):
label = ''
while counter > 0:
counter, x = divmod(counter-1, 26)
# This makes a strong assumption that lowercase letters
# and uppercase letters form two contiguous blocks, with
# letters in order!
s = chr(ord(case) + x)
label = s + label
return label
def format_roman(self, case, counter):
ones = ['i', 'x', 'c', 'm']
fives = ['v', 'l', 'd']
label, index = '', 0
# This will die of IndexError when counter is too big
while counter > 0:
counter, x = divmod(counter, 10)
if x == 9:
label = ones[index] + ones[index+1] + label
elif x == 4:
label = ones[index] + fives[index] + label
else:
if x >= 5:
s = fives[index]
x = x-5
else:
s = ''
s = s + ones[index]*x
label = s + label
index = index + 1
if case == 'I':
return label.upper()
return label
def add_flowing_data(self, data):
if not data: return
prespace = data[:1].isspace()
postspace = data[-1:].isspace()
data = " ".join(data.split())
if self.nospace and not data:
return
elif prespace or self.softspace:
if not data:
if not self.nospace:
self.softspace = 1
self.parskip = 0
return
if not self.nospace:
data = ' ' + data
self.hard_break = self.nospace = self.para_end = \
self.parskip = self.have_label = 0
self.softspace = postspace
self.writer.send_flowing_data(data)
def add_literal_data(self, data):
if not data: return
if self.softspace:
self.writer.send_flowing_data(" ")
self.hard_break = data[-1:] == '\n'
self.nospace = self.para_end = self.softspace = \
self.parskip = self.have_label = 0
self.writer.send_literal_data(data)
def flush_softspace(self):
if self.softspace:
self.hard_break = self.para_end = self.parskip = \
self.have_label = self.softspace = 0
self.nospace = 1
self.writer.send_flowing_data(' ')
def push_alignment(self, align):
if align and align != self.align:
self.writer.new_alignment(align)
self.align = align
self.align_stack.append(align)
else:
self.align_stack.append(self.align)
def pop_alignment(self):
if self.align_stack:
del self.align_stack[-1]
if self.align_stack:
self.align = align = self.align_stack[-1]
self.writer.new_alignment(align)
else:
self.align = None
self.writer.new_alignment(None)
def push_font(self, font):
size, i, b, tt = font
if self.softspace:
self.hard_break = self.para_end = self.softspace = 0
self.nospace = 1
self.writer.send_flowing_data(' ')
if self.font_stack:
csize, ci, cb, ctt = self.font_stack[-1]
if size is AS_IS: size = csize
if i is AS_IS: i = ci
if b is AS_IS: b = cb
if tt is AS_IS: tt = ctt
font = (size, i, b, tt)
self.font_stack.append(font)
self.writer.new_font(font)
def pop_font(self):
if self.font_stack:
del self.font_stack[-1]
if self.font_stack:
font = self.font_stack[-1]
else:
font = None
self.writer.new_font(font)
def push_margin(self, margin):
self.margin_stack.append(margin)
fstack = [m for m in self.margin_stack if m]
if not margin and fstack:
margin = fstack[-1]
self.writer.new_margin(margin, len(fstack))
def pop_margin(self):
if self.margin_stack:
del self.margin_stack[-1]
fstack = [m for m in self.margin_stack if m]
if fstack:
margin = fstack[-1]
else:
margin = None
self.writer.new_margin(margin, len(fstack))
def set_spacing(self, spacing):
self.spacing = spacing
self.writer.new_spacing(spacing)
def push_style(self, *styles):
if self.softspace:
self.hard_break = self.para_end = self.softspace = 0
self.nospace = 1
self.writer.send_flowing_data(' ')
for style in styles:
self.style_stack.append(style)
self.writer.new_styles(tuple(self.style_stack))
def pop_style(self, n=1):
del self.style_stack[-n:]
self.writer.new_styles(tuple(self.style_stack))
def assert_line_data(self, flag=1):
self.nospace = self.hard_break = not flag
self.para_end = self.parskip = self.have_label = 0
class NullWriter:
"""Minimal writer interface to use in testing & inheritance.
A writer which only provides the interface definition; no actions are
taken on any methods. This should be the base class for all writers
which do not need to inherit any implementation methods.
"""
def __init__(self): pass
def flush(self): pass
def new_alignment(self, align): pass
def new_font(self, font): pass
def new_margin(self, margin, level): pass
def new_spacing(self, spacing): pass
def new_styles(self, styles): pass
def send_paragraph(self, blankline): pass
def send_line_break(self): pass
def send_hor_rule(self, *args, **kw): pass
def send_label_data(self, data): pass
def send_flowing_data(self, data): pass
def send_literal_data(self, data): pass
class AbstractWriter(NullWriter):
"""A writer which can be used in debugging formatters, but not much else.
Each method simply announces itself by printing its name and
arguments on standard output.
"""
def new_alignment(self, align):
print("new_alignment(%r)" % (align,))
def new_font(self, font):
print("new_font(%r)" % (font,))
def new_margin(self, margin, level):
print("new_margin(%r, %d)" % (margin, level))
def new_spacing(self, spacing):
print("new_spacing(%r)" % (spacing,))
def new_styles(self, styles):
print("new_styles(%r)" % (styles,))
def send_paragraph(self, blankline):
print("send_paragraph(%r)" % (blankline,))
def send_line_break(self):
print("send_line_break()")
def send_hor_rule(self, *args, **kw):
print("send_hor_rule()")
def send_label_data(self, data):
print("send_label_data(%r)" % (data,))
def send_flowing_data(self, data):
print("send_flowing_data(%r)" % (data,))
def send_literal_data(self, data):
print("send_literal_data(%r)" % (data,))
class DumbWriter(NullWriter):
"""Simple writer class which writes output on the file object passed in
as the file parameter or, if file is omitted, on standard output. The
output is simply word-wrapped to the number of columns specified by
the maxcol parameter. This class is suitable for reflowing a sequence
of paragraphs.
"""
def __init__(self, file=None, maxcol=72):
self.file = file or sys.stdout
self.maxcol = maxcol
NullWriter.__init__(self)
self.reset()
def reset(self):
self.col = 0
self.atbreak = 0
def send_paragraph(self, blankline):
self.file.write('\n'*blankline)
self.col = 0
self.atbreak = 0
def send_line_break(self):
self.file.write('\n')
self.col = 0
self.atbreak = 0
def send_hor_rule(self, *args, **kw):
self.file.write('\n')
self.file.write('-'*self.maxcol)
self.file.write('\n')
self.col = 0
self.atbreak = 0
def send_literal_data(self, data):
self.file.write(data)
i = data.rfind('\n')
if i >= 0:
self.col = 0
data = data[i+1:]
data = data.expandtabs()
self.col = self.col + len(data)
self.atbreak = 0
def send_flowing_data(self, data):
if not data: return
atbreak = self.atbreak or data[0].isspace()
col = self.col
maxcol = self.maxcol
write = self.file.write
for word in data.split():
if atbreak:
if col + len(word) >= maxcol:
write('\n')
col = 0
else:
write(' ')
col = col + 1
write(word)
col = col + len(word)
atbreak = 1
self.col = col
self.atbreak = data[-1].isspace()
def test(file = None):
w = DumbWriter()
f = AbstractFormatter(w)
if file is not None:
fp = open(file)
elif sys.argv[1:]:
fp = open(sys.argv[1])
else:
fp = sys.stdin
for line in fp:
if line == '\n':
f.end_paragraph(1)
else:
f.add_flowing_data(line)
f.end_paragraph(0)
if __name__ == '__main__':
test()
| gpl-3.0 |
Aimage/shinken | test/test_freshness.py | 18 | 3293 | #!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, [email protected]
# Gerhard Lausser, [email protected]
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from shinken_test import *
class TestFreshness(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_freshness.cfg')
# Check if the check_freshnes is doing it's job
def test_check_freshness(self):
self.print_header()
# We want an eventhandelr (the perfdata command) to be put in the actions dict
# after we got a service check
now = time.time()
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
svc.active_checks_enabled = False
self.assertEqual(True, svc.check_freshness)
#--------------------------------------------------------------
# initialize host/service state
#--------------------------------------------------------------
# We do not want to be just a string but a real command
print "Additonal freshness latency", svc.__class__.additional_freshness_latency
self.scheduler_loop(1, [[svc, 0, 'OK | bibi=99%']])
print "Addi:", svc.last_state_update, svc.freshness_threshold, svc.check_freshness
# By default check fresh ness is set at false, so no new checks
self.assertEqual(0, len(svc.actions))
svc.do_check_freshness()
self.assertEqual(0, len(svc.actions))
# We make it 10s less than it was
svc.last_state_update = svc.last_state_update - 10
#svc.check_freshness = True
# Now we active it, with a too small value (now - 10s is still higer than now - (1 - 15, the addition time)
# So still no check
svc.freshness_threshold = 1
print "Addi:", svc.last_state_update, svc.freshness_threshold, svc.check_freshness
svc.do_check_freshness()
self.assertEqual(0, len(svc.actions))
# Now active globaly the check freshness
cmd = "[%lu] ENABLE_SERVICE_FRESHNESS_CHECKS" % now
self.sched.run_external_command(cmd)
# Ok, now, we remove again 10s. Here we will saw the new entry
svc.last_state_update = svc.last_state_update - 10
svc.do_check_freshness()
self.assertEqual(1, len(svc.actions))
# And we check for the message in the log too
self.assert_any_log_match('The results of service.*')
if __name__ == '__main__':
unittest.main()
| agpl-3.0 |
TheImagingSource/tiscamera | doc/sphinx-tabs/sphinx_tabs/test/testcase.py | 1 | 5094 | # pylint: disable=import-error,no-name-in-module
from distutils.version import StrictVersion
import unittest
import re
import os
import pkg_resources
from sphinx import __version__ as __sphinx_version__
from sphinx.builders.html import StandaloneHTMLBuilder
from lxml import etree
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
def _parse(xml):
xml = xml.replace('©', 'copy_sym')
parser = etree.XMLParser(ns_clean=True, remove_blank_text=True)
return etree.parse(StringIO(xml), parser)
def _strip_xmlns(xml):
return xml.replace(' xmlns="http://www.w3.org/1999/xhtml"', '')
def get_scripts(xml):
tree = _parse(xml)
scripts = tree.findall('.//{*}script')
scripts = [x.get('src') for x in scripts]
return [x.replace('_static/', '') for x in scripts if x is not None]
def get_stylesheets(xml):
tree = _parse(xml)
stylesheets = tree.findall('.//{*}link[@rel="stylesheet"]')
return [x.get('href').replace('_static/', '') for x in stylesheets]
def get_body(xml):
tree = _parse(xml)
body = tree.find('.//{*}div[@class="bodywrapper"]')[0][0]
return _strip_xmlns(etree.tostring(body).decode('utf-8'))
def normalize_xml(xml):
content = re.sub(r'>\s+<', '><', xml)
content = etree.tostring(
_parse(content), pretty_print=True).decode('utf-8')
return content
class TestCase(unittest.TestCase):
def tearDown(self): # pylint: disable=invalid-name
# Reset script and css files after test
if StrictVersion(__sphinx_version__) < StrictVersion('1.8.0'):
StandaloneHTMLBuilder.script_files = \
StandaloneHTMLBuilder.script_files[:3]
if StrictVersion(__sphinx_version__) > StrictVersion('1.6.0'):
# pylint: disable=no-name-in-module
from sphinx.builders.html import CSSContainer
StandaloneHTMLBuilder.css_files = CSSContainer()
# pylint: enable=no-name-in-module
elif StrictVersion(__sphinx_version__) < StrictVersion('1.8.0'):
StandaloneHTMLBuilder.css_files = []
@staticmethod
def get_result(app, filename):
path = os.path.join(app.outdir, filename+'.html')
with open(path, 'r') as handle:
return handle.read()
@staticmethod
def get_expectation(dirname, filename):
provider = pkg_resources.get_provider(__name__)
resource = '%s/%s.html' % (dirname, filename)
if provider.has_resource(resource):
return pkg_resources.resource_string(
__name__, resource).decode('utf-8')
result = []
for i in range(10):
resource_i = '%s.%d' % (resource, i)
if provider.has_resource(resource_i):
result.append(pkg_resources.resource_string(
__name__, resource_i).decode('utf-8'))
return result
def assertXMLEqual( # pylint: disable=invalid-name
self, expected, actual):
if isinstance(expected, list):
actual = normalize_xml(get_body(actual))
for expected_candidate in expected:
expected_candidate = normalize_xml(expected_candidate)
if expected_candidate == actual:
return
self.fail('XML does not match')
else:
expected = normalize_xml(expected)
actual = normalize_xml(get_body(actual))
self.assertEqual(expected, actual)
def assertHasTabsAssets( # pylint: disable=invalid-name
self, xml):
stylesheets = get_stylesheets(xml)
scripts = get_scripts(xml)
def filter_scripts(x):
return x != 'documentation_options.js' and 'mathjax' not in x
scripts = [x for x in scripts if filter_scripts(x)]
self.assertEqual(stylesheets, [
'alabaster.css',
'pygments.css',
'sphinx_tabs/tabs.css',
'sphinx_tabs/semantic-ui-2.2.10/segment.min.css',
'sphinx_tabs/semantic-ui-2.2.10/menu.min.css',
'sphinx_tabs/semantic-ui-2.2.10/tab.min.css',
'custom.css'
])
self.assertEqual(scripts, [
'jquery.js',
'underscore.js',
'doctools.js',
'sphinx_tabs/tabs.js',
'sphinx_tabs/semantic-ui-2.2.10/tab.min.js'
])
def assertDoesNotHaveTabsAssets( # pylint: disable=invalid-name
self, xml):
stylesheets = get_stylesheets(xml)
scripts = get_scripts(xml)
for stylesheet in stylesheets:
self.assertTrue('sphinx_tabs' not in stylesheet)
for script in scripts:
self.assertTrue('sphinx_tabs' not in script)
def assertStylesheetsEqual( # pylint: disable=invalid-name
self, expected, xml):
actual = get_stylesheets(xml)
self.assertEqual(expected, actual)
def assertScriptsEqual( # pylint: disable=invalid-name
self, expected, xml):
actual = get_scripts(xml)
self.assertEqual(expected, actual)
| apache-2.0 |
talumbau/blaze | samples/ooc-groupby.py | 4 | 8446 | """
This script performs an out of core groupby operation for different datasets.
The datasets to be processed are normally in CSV files and the key and
value to be used for the grouping are defined programatically via small
functions (see toy_stream() and statsmodel_stream() for examples).
Those datasets included in statsmodel will require this package
installed (it is available in Anaconda, so it should be an easy
dependency to solve).
Usage: $ `script` dataset_class dataset_filename
`dataset_class` can be either 'toy', 'randhie' or 'contributions'.
'toy' is a self-contained dataset and is meant for debugging mainly.
The 'randhie' implements suport for the dataset with the same name
included in the statsmodel package.
Finally 'contributions' is meant to compute aggregations on the
contributions to the different US campaigns. This latter requires a
second argument (datatset_filename) which is a CSV file downloaded from:
http://data.influenceexplorer.com/bulk/
"""
import sys
from itertools import islice
import io
import csv
import numpy as np
from dynd import nd, ndt
import blz
# Number of lines to read per each iteration
LPC = 1000
# Max number of chars to map for a bytes or string in NumPy
MAXCHARS = 64
def get_nptype(dtype, val):
"""Convert the `val` field in dtype into a numpy dtype."""
dytype = dtype[nd.as_py(dtype.field_names).index(val)]
# strings and bytes cannot be natively represented in numpy
if dytype == ndt.string:
nptype = np.dtype("U%d" % MAXCHARS)
elif dytype == ndt.bytes:
nptype = np.dtype("S%d" % MAXCHARS)
else:
# There should be no problems with the rest
nptype = dytype.as_numpy()
return nptype
def groupby(sreader, key, val, dtype, path=None, lines_per_chunk=LPC):
"""Group the `val` field in `sreader` stream of lines by `key` index.
Parameters
----------
sreader : iterator
Iterator over a stream of CSV lines.
key : string
The name of the field to be grouped by.
val : string
The field name with the values that have to be grouped.
dtype : dynd dtype
The DyND data type with all the fields of the CSV lines,
including the `key` and `val` names.
path : string
The path of the file where the BLZ array with the final
grouping will be stored. If None (default), the BLZ will be
stored in-memory (and hence non-persistent).
lines_per_chunk : int
The number of chunks that have to be read to be grouped by
in-memory. For optimal perfomance, some experimentation
should be needed. The default value should work reasonably
well, though.
Returns
-------
output : BLZ table
Returns a BLZ table with column names that are the groups
resulting from the groupby operation. The columns are filled
with the `val` field of the lines delivered by `sreader`.
"""
try:
nptype = get_nptype(dtype, val)
except ValueError:
raise ValueError("`val` should be a valid field")
# Start reading chunks
prev_keys = set()
while True:
ndbuf = nd.array(islice(sreader, lines_per_chunk), dtype)
if len(ndbuf) == 0: break # CSV data exhausted
# Do the groupby for this chunk
keys = getattr(ndbuf, key)
if val is None:
vals = ndbuf
else:
vals = getattr(ndbuf, val)
sby = nd.groupby(vals, keys)
lkeys = nd.as_py(sby.groups)
skeys = set(lkeys)
# BLZ does not understand dynd objects (yet)
sby = nd.as_py(sby.eval())
if len(prev_keys) == 0:
# Add the initial keys to a BLZ table
columns = [np.array(sby[i], nptype) for i in range(len(lkeys))]
ssby = blz.btable(columns=columns, names=lkeys, rootdir=path,
mode='w')
else:
# Have we new keys?
new_keys = skeys.difference(prev_keys)
for new_key in new_keys:
# Get the index of the new key
idx = lkeys.index(new_key)
# and add the values as a new columns
ssby.addcol(sby[idx], new_key, dtype=nptype)
# Now fill the pre-existing keys
existing_keys = skeys.intersection(prev_keys)
for existing_key in existing_keys:
# Get the index of the existing key
idx = lkeys.index(existing_key)
# and append the values here
ssby[existing_key].append(sby[idx])
# Add the new keys to the existing ones
prev_keys |= skeys
# Before returning, flush all data into disk
if path is not None:
ssby.flush()
return ssby
# A CSV toy example
csvbuf = u"""k1,v1,1,u1
k2,v2,2,u2
k3,v3,3,u3
k4,v4,4,u4
k5,v5,5,u5
k5,v6,6,u6
k4,v7,7,u7
k4,v8,8,u8
k4,v9,9,u9
k1,v10,10,u9
k5,v11,11,u11
"""
def toy_stream():
sreader = csv.reader(io.StringIO(csvbuf))
# The dynd dtype for the CSV file above
dt = ndt.type('{key: string, val1: string, val2: int32, val3: bytes}')
# The name of the persisted table where the groupby will be stored
return sreader, dt
# This access different datasets in statsmodel package
def statsmodel_stream(stream):
import statsmodels.api as sm
data = getattr(sm.datasets, stream)
f = open(data.PATH, 'rb')
if stream == 'randhie':
# For a description of this dataset, see:
# http://statsmodels.sourceforge.net/devel/datasets/generated/randhie.html
f.readline() # read out the headers line
dtypes = ('{mdvis: string, lncoins: float32, idp: int32,'
' lpi:float32, fmde: float32, physlm: float32,'
' disea: float32, hlthg: int32, hlthf: int32,'
' hlthp: int32}')
else:
raise NotImplementedError(
"Importing this dataset has not been implemented yet")
sreader = csv.reader(f)
dtype = ndt.type(dtypes)
return sreader, dtype
# For contributions to state and federal US campaings.
# CSV files can be downloaded from:
# http://data.influenceexplorer.com/bulk/
def contributions_stream(stream_file):
f = open(stream_file, 'rb')
# Description of this dataset
headers = f.readline().strip() # read out the headers line
headers = headers.split(',')
# The types for the different fields
htypes = [ ndt.int32, ndt.int16, ndt.int16] + \
[ ndt.string ] * 4 + \
[ ndt.bool, ndt.float64 ] + \
[ ndt.string ] * 33
# Build the DyND data type
dtype = ndt.make_struct(htypes, headers)
sreader = csv.reader(f)
return sreader, dtype
if __name__ == "__main__":
if len(sys.argv) == 1:
print("Specify a dataset from: [toy, randhie, contributions]")
sys.exit()
# Which dataset do we want to group?
which = sys.argv[1]
if which == "toy":
# Get the CSV iterator and dtype of fields
sreader, dt = toy_stream()
# Do the actual sortby
ssby = groupby(sreader, 'key', 'val1', dtype=dt, path=None,
lines_per_chunk=2)
elif which == "randhie":
# Get the CSV iterator and dtype of fields
sreader, dt = statsmodel_stream(which)
# Do the actual sortby
ssby = groupby(sreader, 'mdvis', 'lncoins', dtype=dt, path=None)
elif which == "contributions":
# Get the CSV iterator and dtype of fields
if len(sys.argv) < 3:
print("Please specify a contributions file downloaded from: "
"http://data.influenceexplorer.com/bulk/")
sys.exit()
stream_file = sys.argv[2]
sreader, dt = contributions_stream(stream_file)
# Do the actual sortby
ssby = groupby(
sreader, 'recipient_party', 'amount', dtype=dt, path='contribs.blz')
else:
raise NotImplementedError(
"parsing for `%s` dataset not implemented" % which)
# Retrieve the data in the BLZ structure
#ssby = blz.from_blz(path) # open from disk, if ssby is persistent
for key in ssby.names:
values = ssby[key]
if which in ('toy', 'randhie'):
print "key:", key, values
elif which == 'contributions':
print "Party: '%s'\tAmount: %13.2f\t#contribs: %8d" % \
(key, values.sum(), len(values))
| bsd-3-clause |
vlukes/sfepy | tests/test_poly_spaces.py | 1 | 11051 | """
Test continuity of polynomial basis and its gradients along an edge on
:math:`y` line (2D) or on a face in :math:`x`-:math:`y` plane (3D) between two
elements aligned with the coordinate system, stack one on top of the other. The
evaluation occurs in several points shifted by a very small amount from the
boundary between the elements into the top and the bottom element.
For H1 space, the basis should be continuous. The components of its gradient
parallel to the edge/face should be continuous as well, while the perpendicular
component should have the same absolute value, but different sign in the top
and the bottom element.
All connectivity permutations of the two elements are tested.
WARNING: Lagrange basis on 3_8 elements fails the test for order >= 3 for many
connectivity permutations!
"""
from __future__ import absolute_import
import numpy as nm
from sfepy.base.testing import TestCommon
from sfepy.base.base import assert_
rsels = {
'2_3' : 'vertices in (y > -0.1) & (y < 0.1)',
'2_4' : 'vertices in (y > 0.9) & (y < 1.1)',
'3_4' : 'vertices in (z > -0.1) & (z < 0.1)',
'3_8' : 'vertices in (z > 0.9) & (z < 1.1)',
}
eps = 1e-5
shifts = {
'2_3' : nm.array([[0.0, 0.0], [0.0, eps]], dtype=nm.float64),
'2_4' : nm.array([[0.0, 1.0], [0.0, eps]], dtype=nm.float64),
'3_4' : nm.array([[0.0, 0.0, 0.0], [0.0, 0.0, eps]], dtype=nm.float64),
'3_8' : nm.array([[0.0, 0.0, 1.0], [0.0, 0.0, eps]], dtype=nm.float64),
}
def _gen_common_data(orders, gels, report):
import sfepy
from sfepy.base.base import Struct
from sfepy.linalg import combine
from sfepy.discrete import FieldVariable, Integral
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.discrete.common.global_interp import get_ref_coors
bases = ([ii for ii in combine([['2_4', '3_8'],
['lagrange', 'lobatto']])]
+ [ii for ii in combine([['2_3', '3_4'],
['lagrange']])])
for geom, poly_space_base in bases:
report('geometry: %s, base: %s' % (geom, poly_space_base))
order = orders[geom]
integral = Integral('i', order=order)
aux = '' if geom in ['2_4', '3_8'] else 'z'
mesh0 = Mesh.from_file('meshes/elements/%s_2%s.mesh' % (geom, aux),
prefix_dir=sfepy.data_dir)
gel = gels[geom]
perms = gel.get_conn_permutations()
qps, qp_weights = integral.get_qp(gel.surface_facet.name)
zz = nm.zeros_like(qps[:, :1])
qps = nm.hstack(([qps] + [zz]))
shift = shifts[geom]
rcoors = nm.ascontiguousarray(qps
+ shift[:1, :] - shift[1:, :])
ccoors = nm.ascontiguousarray(qps
+ shift[:1, :] + shift[1:, :])
for ir, pr in enumerate(perms):
for ic, pc in enumerate(perms):
report('ir: %d, ic: %d' % (ir, ic))
report('pr: %s, pc: %s' % (pr, pc))
mesh = mesh0.copy()
conn = mesh.cmesh.get_conn(mesh0.cmesh.tdim, 0).indices
conn = conn.reshape((mesh0.n_el, -1))
conn[0, :] = conn[0, pr]
conn[1, :] = conn[1, pc]
conn2 = mesh.get_conn(gel.name)
assert_((conn == conn2).all())
cache = Struct(mesh=mesh)
domain = FEDomain('domain', mesh)
omega = domain.create_region('Omega', 'all')
region = domain.create_region('Facet', rsels[geom], 'facet')
field = Field.from_args('f', nm.float64, shape=1,
region=omega, approx_order=order,
poly_space_base=poly_space_base)
var = FieldVariable('u', 'unknown', field)
report('# dofs: %d' % var.n_dof)
vec = nm.empty(var.n_dof, dtype=var.dtype)
ps = field.poly_space
dofs = field.get_dofs_in_region(region, merge=False)
edofs, fdofs = nm.unique(dofs[1]), nm.unique(dofs[2])
rrc, rcells, rstatus = get_ref_coors(field, rcoors,
cache=cache)
crc, ccells, cstatus = get_ref_coors(field, ccoors,
cache=cache)
assert_((rstatus == 0).all() and (cstatus == 0).all())
yield (geom, poly_space_base, qp_weights, mesh, ir, ic,
field, ps, rrc, rcells[0], crc, ccells[0],
vec, edofs, fdofs)
class Test(TestCommon):
@staticmethod
def from_conf(conf, options):
from sfepy.discrete.fem.geometry_element import GeometryElement
gels = {}
for key in ['2_3', '2_4', '3_4', '3_8']:
gel = GeometryElement(key)
gel.create_surface_facet()
gels[key] = gel
return Test(conf=conf, options=options, gels=gels)
def test_continuity(self):
ok = True
orders = {'2_3' : 3, '2_4' : 3, '3_4' : 4, '3_8' : 3}
bads = []
bad_families = set()
for (geom, poly_space_base, qp_weights, mesh, ir, ic,
field, ps, rrc, rcell, crc, ccell, vec,
edofs, fdofs) in _gen_common_data(orders, self.gels, self.report):
if poly_space_base == 'lagrange':
rbf = ps.eval_base(rrc)
cbf = ps.eval_base(crc)
else:
rbf = ps.eval_base(rrc, ori=field.ori[:1])
cbf = ps.eval_base(crc, ori=field.ori[1:])
dofs = nm.r_[edofs, fdofs]
res = nm.zeros((2, dofs.shape[0]), dtype=nm.int32)
res[0, :] = dofs
for ii, ip in enumerate(dofs):
vec.fill(0.0)
vec[ip] = 1.0
evec = vec[field.econn]
rvals = nm.dot(rbf, evec[rcell])
cvals = nm.dot(cbf, evec[ccell])
_ok = nm.allclose(rvals, cvals, atol=1e-14, rtol=0.0)
res[1, ii] = _ok
if not _ok:
bads.append([geom, poly_space_base, ir, ic, ip])
bad_families.add((geom, poly_space_base))
ok = ok and _ok
self.report('results (dofs, status: 1 ok, 0 failure):\n%s' % res)
if not ok:
self.report('continuity errors:\n', bads)
self.report('%d in total!' % len(bads))
self.report('continuity errors occurred in these spaces:\n',
bad_families)
return ok
def test_gradients(self):
from sfepy.discrete.fem.mappings import VolumeMapping
ok = True
orders = {'2_3' : 3, '2_4' : 3, '3_4' : 4, '3_8' : 3}
bads = []
bad_families = set()
for (geom, poly_space_base, qp_weights, mesh, ir, ic,
field, ps, rrc, rcell, crc, ccell, vec,
edofs, fdofs) in _gen_common_data(orders, self.gels, self.report):
gel = self.gels[geom]
conn = mesh.get_conn(gel.name)
geo_ps = field.gel.poly_space
rmapping = VolumeMapping(mesh.coors, conn[rcell:rcell+1],
poly_space=geo_ps)
rori = field.ori[:1] if field.ori is not None else None
rvg = rmapping.get_mapping(rrc, qp_weights,
poly_space=ps, ori=rori)
rbfg = rvg.bfg
cmapping = VolumeMapping(mesh.coors, conn[ccell:ccell+1],
poly_space=geo_ps)
cori = field.ori[1:] if field.ori is not None else None
cvg = cmapping.get_mapping(crc, qp_weights,
poly_space=ps, ori=cori)
cbfg = cvg.bfg
dofs = nm.r_[edofs, fdofs]
res = nm.zeros((2, dofs.shape[0]), dtype=nm.int32)
res[0, :] = dofs
for ii, ip in enumerate(dofs):
vec.fill(0.0)
vec[ip] = 1.0
evec = vec[field.econn]
rvals = nm.dot(rbfg, evec[rcell])[0]
cvals = nm.dot(cbfg, evec[ccell])[0]
okx = nm.allclose(rvals[:, 0], cvals[:, 0],
atol=1e-12, rtol=0.0)
if gel.dim == 2:
oky = nm.allclose(rvals[:, 1], -cvals[:, 1],
atol=1e-12, rtol=0.0)
_ok = okx and oky
else:
oky = nm.allclose(rvals[:, 1], cvals[:, 1],
atol=1e-12, rtol=0.0)
okz = nm.allclose(rvals[:, 2], -cvals[:, 2],
atol=1e-12, rtol=0.0)
_ok = okx and oky and okz
res[1, ii] = _ok
if not _ok:
bads.append([geom, poly_space_base, ir, ic, ip])
bad_families.add((geom, poly_space_base))
ok = ok and _ok
self.report('results (dofs, status: 1 ok, 0 failure):\n%s' % res)
if not ok:
self.report('gradient continuity errors:\n', bads)
self.report('%d in total!' % len(bads))
self.report('gradient continuity errors occurred in these'
' spaces:\n', bad_families)
return ok
def test_hessians(self):
"""
Test the second partial derivatives of basis functions using finite
differences.
"""
from sfepy.linalg import combine
from sfepy.discrete import Integral, PolySpace
ok = True
orders = {'2_3' : 3, '2_4' : 3, '3_4' : 4, '3_8' : 3}
bases = ([ii for ii in combine([['2_3', '2_4', '3_4', '3_8'],
['lagrange']])])
for geom, poly_space_base in bases:
self.report('geometry: %s, base: %s' % (geom, poly_space_base))
order = orders[geom]
integral = Integral('i', order=order)
coors, _ = integral.get_qp(geom)
ps = PolySpace.any_from_args('ps', self.gels[geom], order,
base=poly_space_base)
dim = coors.shape[1]
h1 = nm.zeros((coors.shape[0], dim, dim, ps.n_nod), nm.float64)
eps = 1e-8
for ir in range(dim):
cc = coors.copy()
cc[:, ir] -= eps
aux0 = ps.eval_base(cc, diff=1)
cc[:, ir] += 2 * eps
aux1 = ps.eval_base(cc, diff=1)
h1[:, :, ir, :] = 0.5 * (aux1 - aux0) / eps
h2 = ps.eval_base(coors, diff=2)
_ok = nm.allclose(h1, h2, rtol=0, atol=50*eps)
self.report('hessians: error: %.2e ok: %s'
% (nm.abs(h1 - h2).max(), _ok))
ok = ok and _ok
return ok
| bsd-3-clause |
jurassic-c/Pynterface | Pynterface/grid.py | 1 | 2463 | class grid:
w = 0
h = 0
grid = None
def __init__(self):
rows = grid_array()
rows.onAdd = self._add_width
cols = grid_array()
cols.onAdd = self._add_height
self.grid = rows
self.grid.append(cols)
def __getitem__(self, x):
return self.grid[x]
def __setitem__(self, x, value):
self.grid[x] = value
def __str__(self):
return str(self.grid)
def item(self, coords):
return self.grid[coords[0]][coords[1]]
def set(self, x, y, value):
if x > self.w -1:
self._expand(x+1, self.h)
if y > self.h -1:
self._expand(self.w, y+1)
self.grid[x][y] = value
def next_coords_from(self, start, direction="right"):
x, y = start
if direction == "right":
x+= 1
if x >= self.w:
x = 0
elif direction == "left":
x-= 1
if x < 0:
x = self.w-1
elif direction == "up":
y-= 1
if y < 0:
y = self.h-1
elif direction == "down":
y+= 1
if y >= self.h:
y = 0
return (x, y)
def append(self, value):
self.grid.append(grid_array(value))
def add_right(self, new_grid):
w = self.w
for y in range(new_grid.h):
for x in range(new_grid.w):
self.set(x+w, y, new_grid[x][y])
def add_bottom(self, new_grid):
h = self.h
for x in range(new_grid.w):
for y in range(len(new_grid[x])):
self.set(x, y+h, new_grid[x][y])
def _expand(self, w, h):
if w > self.w:
for i in range(w - self.w):
self.grid.append(grid_array())
for col in self.grid:
for i in range(h - len(col)):
col.append(None)
self.h = h
def get_col(self, x):
return self.grid[x]
def get_row(self, y):
new_row = grid_array()
for x in range(self.w):
new_row.append(self.grid[x][y])
return new_row
def _add_width(self, arr):
self.w+= 1
def _add_height(self, arr):
if len(arr) > self.h:
self.h = len(arr)
class grid_array:
items = None
onAdd = None
def __init__(self, value=None):
self.items = []
if isinstance(value, list):
self.items.extend(value)
elif value != None:
self.items.append(value)
def __getitem__(self, i):
return self.items[i]
def __setitem__(self, i, value):
self.items[i] = value
def __len__(self):
return len(self.items)
def append(self, value):
self.items.append(value)
if self.onAdd:
self.onAdd(self)
def unique(self):
seen = set()
seen_add = seen.add
return [ x for x in self.items if x not in seen and not seen_add(x)]
def __str__(self):
return "[%s]" % ', '.join([str(item) for item in self.items]) | bsd-3-clause |
steedos/odoo | addons/purchase/report/purchase_report.py | 137 | 7689 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#
# Please note that these reports are not multi-currency !!!
#
from openerp.osv import fields,osv
from openerp import tools
class purchase_report(osv.osv):
_name = "purchase.report"
_description = "Purchases Orders"
_auto = False
_columns = {
'date': fields.datetime('Order Date', readonly=True, help="Date on which this document has been created"), # TDE FIXME master: rename into date_order
'state': fields.selection([('draft', 'Request for Quotation'),
('confirmed', 'Waiting Supplier Ack'),
('approved', 'Approved'),
('except_picking', 'Shipping Exception'),
('except_invoice', 'Invoice Exception'),
('done', 'Done'),
('cancel', 'Cancelled')],'Order Status', readonly=True),
'product_id':fields.many2one('product.product', 'Product', readonly=True),
'picking_type_id': fields.many2one('stock.warehouse', 'Warehouse', readonly=True),
'location_id': fields.many2one('stock.location', 'Destination', readonly=True),
'partner_id':fields.many2one('res.partner', 'Supplier', readonly=True),
'pricelist_id':fields.many2one('product.pricelist', 'Pricelist', readonly=True),
'date_approve':fields.date('Date Approved', readonly=True),
'expected_date':fields.date('Expected Date', readonly=True),
'validator' : fields.many2one('res.users', 'Validated By', readonly=True),
'product_uom' : fields.many2one('product.uom', 'Reference Unit of Measure', required=True),
'company_id':fields.many2one('res.company', 'Company', readonly=True),
'user_id':fields.many2one('res.users', 'Responsible', readonly=True),
'delay':fields.float('Days to Validate', digits=(16,2), readonly=True),
'delay_pass':fields.float('Days to Deliver', digits=(16,2), readonly=True),
'quantity': fields.integer('Unit Quantity', readonly=True), # TDE FIXME master: rename into unit_quantity
'price_total': fields.float('Total Price', readonly=True),
'price_average': fields.float('Average Price', readonly=True, group_operator="avg"),
'negociation': fields.float('Purchase-Standard Price', readonly=True, group_operator="avg"),
'price_standard': fields.float('Products Value', readonly=True, group_operator="sum"),
'nbr': fields.integer('# of Lines', readonly=True), # TDE FIXME master: rename into nbr_lines
'category_id': fields.many2one('product.category', 'Category', readonly=True)
}
_order = 'date desc, price_total desc'
def init(self, cr):
tools.sql.drop_view_if_exists(cr, 'purchase_report')
cr.execute("""
create or replace view purchase_report as (
WITH currency_rate (currency_id, rate, date_start, date_end) AS (
SELECT r.currency_id, r.rate, r.name AS date_start,
(SELECT name FROM res_currency_rate r2
WHERE r2.name > r.name AND
r2.currency_id = r.currency_id
ORDER BY r2.name ASC
LIMIT 1) AS date_end
FROM res_currency_rate r
)
select
min(l.id) as id,
s.date_order as date,
l.state,
s.date_approve,
s.minimum_planned_date as expected_date,
s.dest_address_id,
s.pricelist_id,
s.validator,
spt.warehouse_id as picking_type_id,
s.partner_id as partner_id,
s.create_uid as user_id,
s.company_id as company_id,
l.product_id,
t.categ_id as category_id,
t.uom_id as product_uom,
s.location_id as location_id,
sum(l.product_qty/u.factor*u2.factor) as quantity,
extract(epoch from age(s.date_approve,s.date_order))/(24*60*60)::decimal(16,2) as delay,
extract(epoch from age(l.date_planned,s.date_order))/(24*60*60)::decimal(16,2) as delay_pass,
count(*) as nbr,
sum(l.price_unit*cr.rate*l.product_qty)::decimal(16,2) as price_total,
avg(100.0 * (l.price_unit*cr.rate*l.product_qty) / NULLIF(ip.value_float*l.product_qty/u.factor*u2.factor, 0.0))::decimal(16,2) as negociation,
sum(ip.value_float*l.product_qty/u.factor*u2.factor)::decimal(16,2) as price_standard,
(sum(l.product_qty*cr.rate*l.price_unit)/NULLIF(sum(l.product_qty/u.factor*u2.factor),0.0))::decimal(16,2) as price_average
from purchase_order_line l
join purchase_order s on (l.order_id=s.id)
left join product_product p on (l.product_id=p.id)
left join product_template t on (p.product_tmpl_id=t.id)
LEFT JOIN ir_property ip ON (ip.name='standard_price' AND ip.res_id=CONCAT('product.template,',t.id) AND ip.company_id=s.company_id)
left join product_uom u on (u.id=l.product_uom)
left join product_uom u2 on (u2.id=t.uom_id)
left join stock_picking_type spt on (spt.id=s.picking_type_id)
join currency_rate cr on (cr.currency_id = s.currency_id and
cr.date_start <= coalesce(s.date_order, now()) and
(cr.date_end is null or cr.date_end > coalesce(s.date_order, now())))
group by
s.company_id,
s.create_uid,
s.partner_id,
u.factor,
s.location_id,
l.price_unit,
s.date_approve,
l.date_planned,
l.product_uom,
s.minimum_planned_date,
s.pricelist_id,
s.validator,
s.dest_address_id,
l.product_id,
t.categ_id,
s.date_order,
l.state,
spt.warehouse_id,
u.uom_type,
u.category_id,
t.uom_id,
u.id,
u2.factor
)
""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
eonpatapon/rally | rally/common/objects/task.py | 2 | 19210 | # Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import json
import uuid
from rally.common import db
from rally.common.i18n import _LE
from rally import consts
from rally import exceptions
from rally.task.processing import charts
OUTPUT_SCHEMA = {
"type": "object",
"properties": {
"additive": {
"type": "array",
"items": {
"type": "object",
"properties": {
"title": {"type": "string"},
"description": {"type": "string"},
"chart_plugin": {"type": "string"},
"data": {
"type": "array",
"items": {
"type": "array",
"items": [{"type": "string"},
{"type": "number"}],
"additionalItems": False}},
"label": {"type": "string"},
"axis_label": {"type": "string"}},
"required": ["title", "chart_plugin", "data"],
"additionalProperties": False
}
},
"complete": {
"type": "array",
"items": {
"type": "object",
"properties": {
"title": {"type": "string"},
"description": {"type": "string"},
"chart_plugin": {"type": "string"},
"data": {"anyOf": [
{"type": "array",
"items": {
"type": "array",
"items": [
{"type": "string"},
{"anyOf": [
{"type": "array",
"items": {"type": "array",
"items": [{"type": "number"},
{"type": "number"}]
}},
{"type": "number"}]}]}},
{"type": "object",
"properties": {
"cols": {"type": "array",
"items": {"type": "string"}},
"rows": {
"type": "array",
"items": {
"type": "array",
"items": {"anyOf": [{"type": "string"},
{"type": "number"}]}}
}
},
"required": ["cols", "rows"],
"additionalProperties": False}
]},
"label": {"type": "string"},
"axis_label": {"type": "string"}
},
"required": ["title", "chart_plugin", "data"],
"additionalProperties": False
}
}
},
"required": ["additive", "complete"],
"additionalProperties": False
}
TASK_RESULT_SCHEMA = {
"type": "object",
"$schema": consts.JSON_SCHEMA,
"properties": {
"key": {
"type": "object",
"properties": {
"kw": {
"type": "object"
},
"name": {
"type": "string"
},
"pos": {
"type": "integer"
},
},
"required": ["kw", "name", "pos"]
},
"sla": {
"type": "array",
"items": {
"type": "object",
"properties": {
"criterion": {
"type": "string"
},
"detail": {
"type": "string"
},
"success": {
"type": "boolean"
}
}
}
},
"result": {
"type": "array",
"items": {
"type": "object",
"properties": {
"atomic_actions": {
"type": "object"
},
"duration": {
"type": "number"
},
"error": {
"type": "array"
},
"idle_duration": {
"type": "number"
},
# NOTE(amaretskiy): "scenario_output" is deprecated
# in favor of "output"
"scenario_output": {
"type": "object",
"properties": {
"data": {
"type": "object"
},
"errors": {
"type": "string"
},
},
"required": ["data", "errors"]
},
"output": OUTPUT_SCHEMA
},
"required": ["atomic_actions", "duration", "error",
"idle_duration"]
},
"minItems": 1
},
"load_duration": {
"type": "number",
},
"full_duration": {
"type": "number",
},
},
"required": ["key", "sla", "result", "load_duration",
"full_duration"],
"additionalProperties": False
}
TASK_EXTENDED_RESULT_SCHEMA = {
"type": "object",
"$schema": consts.JSON_SCHEMA,
"properties": {
"key": {
"type": "object",
"properties": {
"kw": {
"type": "object"
},
"name": {
"type": "string"
},
"pos": {
"type": "integer"
},
},
"required": ["kw", "name", "pos"]
},
"sla": {
"type": "array",
"items": {
"type": "object",
"properties": {
"criterion": {
"type": "string"
},
"detail": {
"type": "string"
},
"success": {
"type": "boolean"
}
}
}
},
"iterations": {
"type": "array",
"items": {
"type": "object",
"properties": {
"timestamp": {
"type": "number"
},
"atomic_actions": {
"type": "object"
},
"duration": {
"type": "number"
},
"error": {
"type": "array"
},
"idle_duration": {
"type": "number"
},
"output": OUTPUT_SCHEMA
},
"required": ["atomic_actions", "duration", "error",
"idle_duration", "output"]
},
"minItems": 1
},
"created_at": {
"anyOf": [
{"type": "string", "format": "date-time"},
{"type": "null"}
]
},
"updated_at": {
"anyOf": [
{"type": "string", "format": "date-time"},
{"type": "null"}
]
},
"info": {
"type": "object",
"properties": {
"atomic": {"type": "object"},
"iterations_count": {"type": "integer"},
"iterations_failed": {"type": "integer"},
"min_duration": {"type": "number"},
"max_duration": {"type": "number"},
"tstamp_start": {"type": "number"},
"full_duration": {"type": "number"},
"load_duration": {"type": "number"}
}
}
},
"required": ["key", "sla", "iterations", "info"],
"additionalProperties": False
}
class Task(object):
"""Represents a task object."""
# NOTE(andreykurilin): The following stages doesn't contain check for
# current status of task. We should add it in the future, since "abort"
# cmd should work everywhere.
# TODO(andreykurilin): allow abort for each state.
NOT_IMPLEMENTED_STAGES_FOR_ABORT = [consts.TaskStatus.VERIFYING,
consts.TaskStatus.INIT]
def __init__(self, task=None, temporary=False, **attributes):
"""Task object init
:param task: dictionary like object, that represents a task
:param temporary: whenever this param is True the task will be created
with a random UUID and no database record. Used for special
purposes, like task config validation.
"""
self.is_temporary = temporary
if self.is_temporary:
self.task = task or {"uuid": str(uuid.uuid4())}
self.task.update(attributes)
else:
self.task = task or db.task_create(attributes)
def __getitem__(self, key):
return self.task[key]
def to_dict(self):
db_task = self.task
deployment_name = db.deployment_get(
self.task["deployment_uuid"])["name"]
db_task["deployment_name"] = deployment_name
return db_task
@staticmethod
def get_detailed(task_id):
return db.api.task_get_detailed(task_id)
@staticmethod
def get(uuid):
return Task(db.task_get(uuid))
@staticmethod
def get_status(uuid):
return db.task_get_status(uuid)
@staticmethod
def list(status=None, deployment=None):
return [Task(db_task) for db_task in db.task_list(status, deployment)]
@staticmethod
def delete_by_uuid(uuid, status=None):
db.task_delete(uuid, status=status)
def _update(self, values):
if not self.is_temporary:
self.task = db.task_update(self.task["uuid"], values)
def update_status(self, status, allowed_statuses=None):
if allowed_statuses:
db.task_update_status(self.task["uuid"], status, allowed_statuses)
else:
self._update({"status": status})
def update_verification_log(self, log):
self._update({"verification_log": json.dumps(log)})
def set_failed(self, etype, msg, etraceback):
self._update({"status": consts.TaskStatus.FAILED,
"verification_log": json.dumps([etype,
msg,
etraceback
])})
def get_results(self):
return db.task_result_get_all_by_uuid(self.task["uuid"])
@classmethod
def extend_results(cls, results, serializable=False):
"""Modify and extend results with aggregated data.
This is a workaround method that tries to adapt task results
to schema of planned DB refactoring, so this method is expected
to be simplified after DB refactoring since all the data should
be taken as-is directly from the database.
Each scenario results have extra `info' with aggregated data,
and iterations data is represented by iterator - this simplifies
its future implementation as generator and gives ability to process
arbitrary number of iterations with low memory usage.
:param results: list of db.sqlalchemy.models.TaskResult
:param serializable: bool, whether to convert json non-serializable
types (like datetime) to serializable ones
:returns: list of dicts, each dict represents scenario results:
key - dict, scenario input data
sla - list, SLA results
iterations - if serializable, then iterator with
iterations data, otherwise a list
created_at - if serializable, then str datetime,
otherwise absent
updated_at - if serializable, then str datetime,
otherwise absent
info:
atomic - dict where key is one of atomic action names
and value is dict {min_duration: number,
max_duration: number}
iterations_count - int number of iterations
iterations_failed - int number of iterations with errors
min_duration - float minimum iteration duration
max_duration - float maximum iteration duration
tstamp_start - float timestamp of the first iteration
full_duration - float full scenario duration
load_duration - float load scenario duration
"""
extended = []
for scenario_result in results:
scenario = dict(scenario_result)
tstamp_start = 0
min_duration = 0
max_duration = 0
iterations_failed = 0
atomic = collections.OrderedDict()
for itr in scenario["data"]["raw"]:
for atomic_name, duration in itr["atomic_actions"].items():
duration = duration or 0
if atomic_name not in atomic:
atomic[atomic_name] = {"min_duration": duration,
"max_duration": duration}
elif duration < atomic[atomic_name]["min_duration"]:
atomic[atomic_name]["min_duration"] = duration
elif duration > atomic[atomic_name]["max_duration"]:
atomic[atomic_name]["max_duration"] = duration
if not tstamp_start or itr["timestamp"] < tstamp_start:
tstamp_start = itr["timestamp"]
if "output" not in itr:
itr["output"] = {"additive": [], "complete": []}
# NOTE(amaretskiy): Deprecated "scenario_output"
# is supported for backward compatibility
if ("scenario_output" in itr
and itr["scenario_output"]["data"]):
itr["output"]["additive"].append(
{"items": itr["scenario_output"]["data"].items(),
"title": "Scenario output",
"description": "",
"chart": "OutputStackedAreaChart"})
del itr["scenario_output"]
if itr["error"]:
iterations_failed += 1
else:
duration = itr["duration"] or 0
if not min_duration or duration < min_duration:
min_duration = duration
if not max_duration or duration > max_duration:
max_duration = duration
for k in "created_at", "updated_at":
if serializable:
# NOTE(amaretskiy): convert datetime to str,
# because json.dumps() does not like datetime
if scenario[k]:
scenario[k] = scenario[k].strftime("%Y-%d-%mT%H:%M:%S")
else:
del scenario[k]
durations_stat = charts.MainStatsTable(
{"iterations_count": len(scenario["data"]["raw"]),
"atomic": atomic})
for itr in scenario["data"]["raw"]:
durations_stat.add_iteration(itr)
scenario["info"] = {
"stat": durations_stat.render(),
"atomic": atomic,
"iterations_count": len(scenario["data"]["raw"]),
"iterations_failed": iterations_failed,
"min_duration": min_duration,
"max_duration": max_duration,
"tstamp_start": tstamp_start,
"full_duration": scenario["data"]["full_duration"],
"load_duration": scenario["data"]["load_duration"]}
if serializable:
scenario["iterations"] = scenario["data"]["raw"]
else:
scenario["iterations"] = iter(scenario["data"]["raw"])
scenario["sla"] = scenario["data"]["sla"]
del scenario["data"]
del scenario["task_uuid"]
del scenario["id"]
extended.append(scenario)
return extended
def append_results(self, key, value):
db.task_result_create(self.task["uuid"], key, value)
def delete(self, status=None):
db.task_delete(self.task["uuid"], status=status)
def abort(self, soft=False):
current_status = self.get_status(self.task["uuid"])
if current_status in self.NOT_IMPLEMENTED_STAGES_FOR_ABORT:
raise exceptions.RallyException(
_LE("Failed to abort task '%(uuid)s'. It doesn't implemented "
"for '%(stages)s' stages. Current task status is "
"'%(status)s'.") %
{"uuid": self.task["uuid"], "status": current_status,
"stages": ", ".join(self.NOT_IMPLEMENTED_STAGES_FOR_ABORT)})
elif current_status in [consts.TaskStatus.FINISHED,
consts.TaskStatus.FAILED,
consts.TaskStatus.ABORTED]:
raise exceptions.RallyException(
_LE("Failed to abort task '%s', since it already "
"finished.") % self.task.uuid)
new_status = (consts.TaskStatus.SOFT_ABORTING
if soft else consts.TaskStatus.ABORTING)
self.update_status(new_status, allowed_statuses=(
consts.TaskStatus.RUNNING, consts.TaskStatus.SOFT_ABORTING))
| apache-2.0 |
shubhdev/edx-platform | common/djangoapps/track/shim.py | 71 | 6434 | """Map new event context values to old top-level field values. Ensures events can be parsed by legacy parsers."""
import json
import logging
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import UsageKey
log = logging.getLogger(__name__)
CONTEXT_FIELDS_TO_INCLUDE = [
'username',
'session',
'ip',
'agent',
'host',
'referer',
'accept_language'
]
class LegacyFieldMappingProcessor(object):
"""Ensures all required fields are included in emitted events"""
def __call__(self, event):
context = event.get('context', {})
if 'context' in event:
for field in CONTEXT_FIELDS_TO_INCLUDE:
self.move_from_context(field, event)
remove_shim_context(event)
if 'data' in event:
if context.get('event_source', '') == 'browser' and isinstance(event['data'], dict):
event['event'] = json.dumps(event['data'])
else:
event['event'] = event['data']
del event['data']
else:
event['event'] = {}
if 'timestamp' in context:
event['time'] = context['timestamp']
del context['timestamp']
elif 'timestamp' in event:
event['time'] = event['timestamp']
if 'timestamp' in event:
del event['timestamp']
self.move_from_context('event_type', event, event.get('name', ''))
self.move_from_context('event_source', event, 'server')
self.move_from_context('page', event, None)
def move_from_context(self, field, event, default_value=''):
"""Move a field from the context to the top level of the event."""
context = event.get('context', {})
if field in context:
event[field] = context[field]
del context[field]
else:
event[field] = default_value
def remove_shim_context(event):
if 'context' in event:
context = event['context']
# These fields are present elsewhere in the event at this point
context_fields_to_remove = set(CONTEXT_FIELDS_TO_INCLUDE)
# This field is only used for Segment.io web analytics and does not concern researchers
context_fields_to_remove.add('client_id')
for field in context_fields_to_remove:
if field in context:
del context[field]
NAME_TO_EVENT_TYPE_MAP = {
'edx.video.played': 'play_video',
'edx.video.paused': 'pause_video',
'edx.video.stopped': 'stop_video',
'edx.video.loaded': 'load_video',
'edx.video.position.changed': 'seek_video',
'edx.video.seeked': 'seek_video',
'edx.video.transcript.shown': 'show_transcript',
'edx.video.transcript.hidden': 'hide_transcript',
}
class VideoEventProcessor(object):
"""
Converts new format video events into the legacy video event format.
Mobile devices cannot actually emit events that exactly match their counterparts emitted by the LMS javascript
video player. Instead of attempting to get them to do that, we instead insert a shim here that converts the events
they *can* easily emit and converts them into the legacy format.
TODO: Remove this shim and perform the conversion as part of some batch canonicalization process.
"""
def __call__(self, event):
name = event.get('name')
if not name:
return
if name not in NAME_TO_EVENT_TYPE_MAP:
return
# Convert edx.video.seeked to edx.video.position.changed because edx.video.seeked was not intended to actually
# ever be emitted.
if name == "edx.video.seeked":
event['name'] = "edx.video.position.changed"
event['event_type'] = NAME_TO_EVENT_TYPE_MAP[name]
if 'event' not in event:
return
payload = event['event']
if 'module_id' in payload:
module_id = payload['module_id']
try:
usage_key = UsageKey.from_string(module_id)
except InvalidKeyError:
log.warning('Unable to parse module_id "%s"', module_id, exc_info=True)
else:
payload['id'] = usage_key.html_id()
del payload['module_id']
if 'current_time' in payload:
payload['currentTime'] = payload.pop('current_time')
if 'context' in event:
context = event['context']
# Converts seek_type to seek and skip|slide to onSlideSeek|onSkipSeek
if 'seek_type' in payload:
seek_type = payload['seek_type']
if seek_type == 'slide':
payload['type'] = "onSlideSeek"
elif seek_type == 'skip':
payload['type'] = "onSkipSeek"
del payload['seek_type']
# For the iOS build that is returning a +30 for back skip 30
if (
context['application']['version'] == "1.0.02" and
context['application']['name'] == "edx.mobileapp.iOS"
):
if 'requested_skip_interval' in payload and 'type' in payload:
if (
payload['requested_skip_interval'] == 30 and
payload['type'] == "onSkipSeek"
):
payload['requested_skip_interval'] = -30
# For the Android build that isn't distinguishing between skip/seek
if 'requested_skip_interval' in payload:
if abs(payload['requested_skip_interval']) != 30:
if 'type' in payload:
payload['type'] = 'onSlideSeek'
if 'open_in_browser_url' in context:
page, _sep, _tail = context.pop('open_in_browser_url').rpartition('/')
event['page'] = page
event['event'] = json.dumps(payload)
class GoogleAnalyticsProcessor(object):
"""Adds course_id as label, and sets nonInteraction property"""
# documentation of fields here: https://segment.com/docs/integrations/google-analytics/
# this should *only* be used on events destined for segment.com and eventually google analytics
def __call__(self, event):
context = event.get('context', {})
course_id = context.get('course_id')
if course_id is not None:
event['label'] = course_id
event['nonInteraction'] = 1
| agpl-3.0 |
batermj/algorithm-challenger | code-analysis/programming_anguage/python/source_codes/Python3.5.9/Python-3.5.9/Lib/plat-sunos5/CDIO.py | 191 | 1872 | # Generated by h2py from /usr/include/sys/cdio.h
CDROM_LBA = 0x01
CDROM_MSF = 0x02
CDROM_DATA_TRACK = 0x04
CDROM_LEADOUT = 0xAA
CDROM_AUDIO_INVALID = 0x00
CDROM_AUDIO_PLAY = 0x11
CDROM_AUDIO_PAUSED = 0x12
CDROM_AUDIO_COMPLETED = 0x13
CDROM_AUDIO_ERROR = 0x14
CDROM_AUDIO_NO_STATUS = 0x15
CDROM_DA_NO_SUBCODE = 0x00
CDROM_DA_SUBQ = 0x01
CDROM_DA_ALL_SUBCODE = 0x02
CDROM_DA_SUBCODE_ONLY = 0x03
CDROM_XA_DATA = 0x00
CDROM_XA_SECTOR_DATA = 0x01
CDROM_XA_DATA_W_ERROR = 0x02
CDROM_BLK_512 = 512
CDROM_BLK_1024 = 1024
CDROM_BLK_2048 = 2048
CDROM_BLK_2056 = 2056
CDROM_BLK_2336 = 2336
CDROM_BLK_2340 = 2340
CDROM_BLK_2352 = 2352
CDROM_BLK_2368 = 2368
CDROM_BLK_2448 = 2448
CDROM_BLK_2646 = 2646
CDROM_BLK_2647 = 2647
CDROM_BLK_SUBCODE = 96
CDROM_NORMAL_SPEED = 0x00
CDROM_DOUBLE_SPEED = 0x01
CDROM_QUAD_SPEED = 0x03
CDROM_TWELVE_SPEED = 0x0C
CDROM_MAXIMUM_SPEED = 0xff
CDIOC = (0x04 << 8)
CDROMPAUSE = (CDIOC|151)
CDROMRESUME = (CDIOC|152)
CDROMPLAYMSF = (CDIOC|153)
CDROMPLAYTRKIND = (CDIOC|154)
CDROMREADTOCHDR = (CDIOC|155)
CDROMREADTOCENTRY = (CDIOC|156)
CDROMSTOP = (CDIOC|157)
CDROMSTART = (CDIOC|158)
CDROMEJECT = (CDIOC|159)
CDROMVOLCTRL = (CDIOC|160)
CDROMSUBCHNL = (CDIOC|161)
CDROMREADMODE2 = (CDIOC|162)
CDROMREADMODE1 = (CDIOC|163)
CDROMREADOFFSET = (CDIOC|164)
CDROMGBLKMODE = (CDIOC|165)
CDROMSBLKMODE = (CDIOC|166)
CDROMCDDA = (CDIOC|167)
CDROMCDXA = (CDIOC|168)
CDROMSUBCODE = (CDIOC|169)
CDROMGDRVSPEED = (CDIOC|170)
CDROMSDRVSPEED = (CDIOC|171)
SCMD_READ_TOC = 0x43
SCMD_PLAYAUDIO_MSF = 0x47
SCMD_PLAYAUDIO_TI = 0x48
SCMD_PAUSE_RESUME = 0x4B
SCMD_READ_SUBCHANNEL = 0x42
SCMD_PLAYAUDIO10 = 0x45
SCMD_PLAYTRACK_REL10 = 0x49
SCMD_READ_HEADER = 0x44
SCMD_PLAYAUDIO12 = 0xA5
SCMD_PLAYTRACK_REL12 = 0xA9
SCMD_CD_PLAYBACK_CONTROL = 0xC9
SCMD_CD_PLAYBACK_STATUS = 0xC4
SCMD_READ_CDDA = 0xD8
SCMD_READ_CDXA = 0xDB
SCMD_READ_ALL_SUBCODES = 0xDF
CDROM_MODE2_SIZE = 2336
| apache-2.0 |
Nihn/Diamond-1 | src/diamond/utils/scheduler.py | 13 | 3655 | # coding=utf-8
import time
import math
import multiprocessing
import os
import random
import sys
import signal
try:
from setproctitle import getproctitle, setproctitle
except ImportError:
setproctitle = None
from diamond.utils.signals import signal_to_exception
from diamond.utils.signals import SIGALRMException
from diamond.utils.signals import SIGHUPException
def collector_process(collector, metric_queue, log):
"""
"""
proc = multiprocessing.current_process()
if setproctitle:
setproctitle('%s - %s' % (getproctitle(), proc.name))
signal.signal(signal.SIGALRM, signal_to_exception)
signal.signal(signal.SIGHUP, signal_to_exception)
signal.signal(signal.SIGUSR2, signal_to_exception)
interval = float(collector.config['interval'])
log.debug('Starting')
log.debug('Interval: %s seconds', interval)
# Validate the interval
if interval <= 0:
log.critical('interval of %s is not valid!', interval)
sys.exit(1)
# Start the next execution at the next window plus some stagger delay to
# avoid having all collectors running at the same time
next_window = math.floor(time.time() / interval) * interval
stagger_offset = random.uniform(0, interval - 1)
# Allocate time till the end of the window for the collector to run. With a
# minimum of 1 second
max_time = int(max(interval - stagger_offset, 1))
log.debug('Max collection time: %s seconds', max_time)
# Setup stderr/stdout as /dev/null so random print statements in thrid
# party libs do not fail and prevent collectors from running.
# https://github.com/BrightcoveOS/Diamond/issues/722
sys.stdout = open(os.devnull, 'w')
sys.stderr = open(os.devnull, 'w')
while(True):
try:
time_to_sleep = (next_window + stagger_offset) - time.time()
if time_to_sleep > 0:
time.sleep(time_to_sleep)
elif time_to_sleep < 0:
# clock has jumped, lets skip missed intervals
next_window = time.time()
next_window += interval
# Ensure collector run times fit into the collection window
signal.alarm(max_time)
# Collect!
collector._run()
# Success! Disable the alarm
signal.alarm(0)
except SIGALRMException:
log.error('Took too long to run! Killed!')
# Adjust the stagger_offset to allow for more time to run the
# collector
stagger_offset = stagger_offset * 0.9
max_time = int(max(interval - stagger_offset, 1))
log.debug('Max collection time: %s seconds', max_time)
except SIGHUPException:
# Reload the config if requested
# We must first disable the alarm as we don't want it to interrupt
# us and end up with half a loaded config
signal.alarm(0)
log.info('Reloading config reload due to HUP')
collector.load_config()
log.info('Config reloaded')
except Exception:
log.exception('Collector failed!')
break
def handler_process(handlers, metric_queue, log):
proc = multiprocessing.current_process()
if setproctitle:
setproctitle('%s - %s' % (getproctitle(), proc.name))
log.debug('Starting process %s', proc.name)
while(True):
metrics = metric_queue.get(block=True, timeout=None)
for metric in metrics:
for handler in handlers:
handler._process(metric)
for handler in handlers:
handler._flush()
| mit |
onitake/ansible | lib/ansible/modules/cloud/azure/azure_rm_aks_facts.py | 37 | 3911 | #!/usr/bin/python
#
# Copyright (c) 2018 Yuwei Zhou, <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_aks_facts
version_added: "2.6"
short_description: Get Azure Kubernetes Service facts.
description:
- Get facts for a specific Azure Kubernetes Service or all Azure Kubernetes Services.
options:
name:
description:
- Limit results to a specific resource group.
resource_group:
description:
- The resource group to search for the desired Azure Kubernetes Service
tags:
description:
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
extends_documentation_fragment:
- azure
author:
- "Yuwei Zhou (@yuwzho)"
'''
EXAMPLES = '''
- name: Get facts for one Azure Kubernetes Service
azure_rm_aks_facts:
name: Testing
resource_group: TestRG
- name: Get facts for all Azure Kubernetes Services
azure_rm_aks_facts:
- name: Get facts by tags
azure_rm_aks_facts:
tags:
- testing
'''
RETURN = '''
azure_aks:
description: List of Azure Kubernetes Service dicts.
returned: always
type: list
'''
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
from azure.common import AzureHttpError
except:
# handled in azure_rm_common
pass
AZURE_OBJECT_CLASS = 'managedClusters'
class AzureRMManagedClusterFacts(AzureRMModuleBase):
"""Utility class to get Azure Kubernetes Service facts"""
def __init__(self):
self.module_args = dict(
name=dict(type='str'),
resource_group=dict(type='str'),
tags=dict(type='list')
)
self.results = dict(
changed=False,
aks=[]
)
self.name = None
self.resource_group = None
self.tags = None
super(AzureRMManagedClusterFacts, self).__init__(
derived_arg_spec=self.module_args,
supports_tags=False,
facts_module=True
)
def exec_module(self, **kwargs):
for key in self.module_args:
setattr(self, key, kwargs[key])
self.results['aks'] = (
self.get_item() if self.name
else self.list_items()
)
return self.results
def get_item(self):
"""Get a single Azure Kubernetes Service"""
self.log('Get properties for {0}'.format(self.name))
item = None
result = []
try:
item = self.containerservice_client.managed_clusters.get(
self.resource_group, self.name)
except CloudError:
pass
if item and self.has_tags(item.tags, self.tags):
result = [self.serialize_obj(item, AZURE_OBJECT_CLASS)]
return result
def list_items(self):
"""Get all Azure Kubernetes Services"""
self.log('List all Azure Kubernetes Services')
try:
response = self.containerservice_client.managed_clusters.list(
self.resource_group)
except AzureHttpError as exc:
self.fail('Failed to list all items - {0}'.format(str(exc)))
results = []
for item in response:
if self.has_tags(item.tags, self.tags):
results.append(self.serialize_obj(item, AZURE_OBJECT_CLASS))
return results
def main():
"""Main module execution code path"""
AzureRMManagedClusterFacts()
if __name__ == '__main__':
main()
| gpl-3.0 |
nju520/django | django/conf/locale/it/formats.py | 504 | 2079 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'd F Y' # 25 Ottobre 2006
TIME_FORMAT = 'H:i' # 14:30
DATETIME_FORMAT = 'l d F Y H:i' # Mercoledì 25 Ottobre 2006 14:30
YEAR_MONTH_FORMAT = 'F Y' # Ottobre 2006
MONTH_DAY_FORMAT = 'j/F' # 10/2006
SHORT_DATE_FORMAT = 'd/m/Y' # 25/12/2009
SHORT_DATETIME_FORMAT = 'd/m/Y H:i' # 25/10/2009 14:30
FIRST_DAY_OF_WEEK = 1 # Lunedì
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d/%m/%Y', '%Y/%m/%d', # '25/10/2006', '2008/10/25'
'%d-%m-%Y', '%Y-%m-%d', # '25-10-2006', '2008-10-25'
'%d-%m-%y', '%d/%m/%y', # '25-10-06', '25/10/06'
]
DATETIME_INPUT_FORMATS = [
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59'
'%d/%m/%y %H:%M:%S.%f', # '25/10/06 14:30:59.000200'
'%d/%m/%y %H:%M', # '25/10/06 14:30'
'%d/%m/%y', # '25/10/06'
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d-%m-%Y %H:%M:%S', # '25-10-2006 14:30:59'
'%d-%m-%Y %H:%M:%S.%f', # '25-10-2006 14:30:59.000200'
'%d-%m-%Y %H:%M', # '25-10-2006 14:30'
'%d-%m-%Y', # '25-10-2006'
'%d-%m-%y %H:%M:%S', # '25-10-06 14:30:59'
'%d-%m-%y %H:%M:%S.%f', # '25-10-06 14:30:59.000200'
'%d-%m-%y %H:%M', # '25-10-06 14:30'
'%d-%m-%y', # '25-10-06'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| bsd-3-clause |
drawks/ansible | test/units/modules/network/slxos/test_slxos_lldp.py | 38 | 3087 | #
# (c) 2018 Extreme Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
from units.compat.mock import patch
from units.modules.utils import set_module_args
from ansible.modules.network.slxos import slxos_lldp
from .slxos_module import TestSlxosModule, load_fixture
class TestSlxosLldpModule(TestSlxosModule):
module = slxos_lldp
def setUp(self):
super(TestSlxosLldpModule, self).setUp()
self._patch_get_config = patch(
'ansible.modules.network.slxos.slxos_lldp.get_config'
)
self._patch_load_config = patch(
'ansible.modules.network.slxos.slxos_lldp.load_config'
)
self._get_config = self._patch_get_config.start()
self._load_config = self._patch_load_config.start()
def tearDown(self):
super(TestSlxosLldpModule, self).tearDown()
self._patch_get_config.stop()
self._patch_load_config.stop()
def load_fixtures(self, commands=None):
config_file = 'slxos_config_config.cfg'
self._get_config.return_value = load_fixture(config_file)
self._load_config.return_value = None
def test_slxos_lldp_present(self, *args, **kwargs):
set_module_args(dict(
state='present'
))
result = self.execute_module(changed=True)
self.assertEqual(
result,
{
'commands': [
'protocol lldp',
'no disable'
],
'changed': True
}
)
def test_slxos_lldp_absent(self, *args, **kwargs):
set_module_args(dict(
state='absent'
))
result = self.execute_module()
self.assertEqual(
result,
{
'commands': [],
'changed': False
}
)
def test_slxos_lldp_invalid_argument(self, *args, **kwargs):
set_module_args(dict(
state='absent',
shawshank='Redemption'
))
result = self.execute_module(failed=True)
self.assertEqual(result['failed'], True)
self.assertTrue(re.match(
r'Unsupported parameters for \((basic.py|basic.pyc)\) module: '
'shawshank Supported parameters include: state',
result['msg']
), 'Output did not match. Got: %s' % result['msg'])
| gpl-3.0 |
apurvbhartia/gnuradio-routing | gr-howto-write-a-block-cmake/docs/doxygen/doxyxml/doxyindex.py | 19 | 6638 | #
# Copyright 2010 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
"""
Classes providing more user-friendly interfaces to the doxygen xml
docs than the generated classes provide.
"""
import os
from generated import index
from base import Base
from text import description
class DoxyIndex(Base):
"""
Parses a doxygen xml directory.
"""
__module__ = "gnuradio.utils.doxyxml"
def _parse(self):
if self._parsed:
return
super(DoxyIndex, self)._parse()
self._root = index.parse(os.path.join(self._xml_path, 'index.xml'))
for mem in self._root.compound:
converted = self.convert_mem(mem)
# For files we want the contents to be accessible directly
# from the parent rather than having to go through the file
# object.
if self.get_cls(mem) == DoxyFile:
if mem.name.endswith('.h'):
self._members += converted.members()
self._members.append(converted)
else:
self._members.append(converted)
def generate_swig_doc_i(self):
"""
%feature("docstring") gr_make_align_on_samplenumbers_ss::align_state "
Wraps the C++: gr_align_on_samplenumbers_ss::align_state";
"""
pass
class DoxyCompMem(Base):
kind = None
def __init__(self, *args, **kwargs):
super(DoxyCompMem, self).__init__(*args, **kwargs)
@classmethod
def can_parse(cls, obj):
return obj.kind == cls.kind
def set_descriptions(self, parse_data):
bd = description(getattr(parse_data, 'briefdescription', None))
dd = description(getattr(parse_data, 'detaileddescription', None))
self._data['brief_description'] = bd
self._data['detailed_description'] = dd
class DoxyCompound(DoxyCompMem):
pass
class DoxyMember(DoxyCompMem):
pass
class DoxyFunction(DoxyMember):
__module__ = "gnuradio.utils.doxyxml"
kind = 'function'
def _parse(self):
if self._parsed:
return
super(DoxyFunction, self)._parse()
self.set_descriptions(self._parse_data)
self._data['params'] = []
prms = self._parse_data.param
for prm in prms:
self._data['params'].append(DoxyParam(prm))
brief_description = property(lambda self: self.data()['brief_description'])
detailed_description = property(lambda self: self.data()['detailed_description'])
params = property(lambda self: self.data()['params'])
Base.mem_classes.append(DoxyFunction)
class DoxyParam(DoxyMember):
__module__ = "gnuradio.utils.doxyxml"
def _parse(self):
if self._parsed:
return
super(DoxyParam, self)._parse()
self.set_descriptions(self._parse_data)
self._data['declname'] = self._parse_data.declname
brief_description = property(lambda self: self.data()['brief_description'])
detailed_description = property(lambda self: self.data()['detailed_description'])
declname = property(lambda self: self.data()['declname'])
class DoxyClass(DoxyCompound):
__module__ = "gnuradio.utils.doxyxml"
kind = 'class'
def _parse(self):
if self._parsed:
return
super(DoxyClass, self)._parse()
self.retrieve_data()
if self._error:
return
self.set_descriptions(self._retrieved_data.compounddef)
# Sectiondef.kind tells about whether private or public.
# We just ignore this for now.
self.process_memberdefs()
brief_description = property(lambda self: self.data()['brief_description'])
detailed_description = property(lambda self: self.data()['detailed_description'])
Base.mem_classes.append(DoxyClass)
class DoxyFile(DoxyCompound):
__module__ = "gnuradio.utils.doxyxml"
kind = 'file'
def _parse(self):
if self._parsed:
return
super(DoxyFile, self)._parse()
self.retrieve_data()
self.set_descriptions(self._retrieved_data.compounddef)
if self._error:
return
self.process_memberdefs()
brief_description = property(lambda self: self.data()['brief_description'])
detailed_description = property(lambda self: self.data()['detailed_description'])
Base.mem_classes.append(DoxyFile)
class DoxyNamespace(DoxyCompound):
__module__ = "gnuradio.utils.doxyxml"
kind = 'namespace'
Base.mem_classes.append(DoxyNamespace)
class DoxyGroup(DoxyCompound):
__module__ = "gnuradio.utils.doxyxml"
kind = 'group'
def _parse(self):
if self._parsed:
return
super(DoxyGroup, self)._parse()
self.retrieve_data()
if self._error:
return
cdef = self._retrieved_data.compounddef
self._data['title'] = description(cdef.title)
# Process inner groups
grps = cdef.innergroup
for grp in grps:
converted = DoxyGroup.from_refid(grp.refid, top=self.top)
self._members.append(converted)
# Process inner classes
klasses = cdef.innerclass
for kls in klasses:
converted = DoxyClass.from_refid(kls.refid, top=self.top)
self._members.append(converted)
# Process normal members
self.process_memberdefs()
title = property(lambda self: self.data()['title'])
Base.mem_classes.append(DoxyGroup)
class DoxyFriend(DoxyMember):
__module__ = "gnuradio.utils.doxyxml"
kind = 'friend'
Base.mem_classes.append(DoxyFriend)
class DoxyOther(Base):
__module__ = "gnuradio.utils.doxyxml"
kinds = set(['variable', 'struct', 'union', 'define', 'typedef', 'enum', 'dir', 'page'])
@classmethod
def can_parse(cls, obj):
return obj.kind in cls.kinds
Base.mem_classes.append(DoxyOther)
| gpl-3.0 |
rajul/tvb-library | tvb/datatypes/region_mapping_framework.py | 1 | 4261 | # -*- coding: utf-8 -*-
#
#
# TheVirtualBrain-Framework Package. This package holds all Data Management, and
# Web-UI helpful to run brain-simulations. To use it, you also need do download
# TheVirtualBrain-Scientific Package (for simulators). See content of the
# documentation-folder for more details. See also http://www.thevirtualbrain.org
#
# (c) 2012-2013, Baycrest Centre for Geriatric Care ("Baycrest")
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 2 as published by the Free
# Software Foundation. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details. You should have received a copy of the GNU General
# Public License along with this program; if not, you can download it here
# http://www.gnu.org/licenses/old-licenses/gpl-2.0
#
#
# CITATION:
# When using The Virtual Brain for scientific publications, please cite it as follows:
#
# Paula Sanz Leon, Stuart A. Knock, M. Marmaduke Woodman, Lia Domide,
# Jochen Mersmann, Anthony R. McIntosh, Viktor Jirsa (2013)
# The Virtual Brain: a simulator of primate brain network dynamics.
# Frontiers in Neuroinformatics (7:10. doi: 10.3389/fninf.2013.00010)
#
#
"""
DataTypes for mapping some TVB DataTypes to a Connectivity (regions).
.. moduleauthor:: Lia Domide <[email protected]>
.. moduleauthor:: Mihai Andrei <[email protected]>
"""
import numpy
import tvb.basic.traits.exceptions as exceptions
from tvb.basic.logger.builder import get_logger
from tvb.datatypes.region_mapping_data import RegionMappingData, RegionVolumeMappingData
LOG = get_logger(__name__)
class RegionMappingFramework(RegionMappingData):
"""
Framework methods regarding RegionMapping DataType.
"""
__tablename__ = None
def get_region_mapping_slice(self, start_idx, end_idx):
"""
Get a slice of the region mapping as used by the region viewers.
For each vertex on the surface, alpha-indices will be the closest
region-index
:param start_idx: vertex index on the surface
:param end_idx: vertex index on the surface
:return: NumPy array with [colosest_reg_idx ...]
"""
if isinstance(start_idx, (str, unicode)):
start_idx = int(start_idx)
if isinstance(end_idx, (str, unicode)):
end_idx = int(end_idx)
return self.array_data[start_idx: end_idx].T
def generate_new_region_mapping(self, connectivity_gid, storage_path):
"""
Generate a new region mapping with the given connectivity gid from an
existing mapping corresponding to the parent connectivity.
"""
new_region_map = self.__class__()
new_region_map.storage_path = storage_path
new_region_map._connectivity = connectivity_gid
new_region_map._surface = self._surface
new_region_map.array_data = self.array_data
return new_region_map
class RegionVolumeMappingFramework(RegionVolumeMappingData):
"""
Framework methods regarding RegionVolumeMapping DataType.
"""
__tablename__ = None
apply_corrections = True
def write_data_slice(self, data):
"""
We are using here the same signature as in TS, just to allow easier parsing code.
This method will also validate the data range nd convert it to int, along with writing it is H5.
:param data: 3D int array
"""
LOG.info("Writing RegionVolumeMapping with min=%d, mix=%d" % (data.min(), data.max()))
if self.apply_corrections:
data = numpy.array(data, dtype=numpy.int32)
data = data - 1
data[data >= self.connectivity.number_of_regions] = -1
LOG.debug("After corrections: RegionVolumeMapping min=%d, mix=%d" % (data.min(), data.max()))
if data.min() < -1 or data.max() >= self.connectivity.number_of_regions:
raise exceptions.ValidationException("Invalid Mapping array: [%d ... %d]" % (data.min(), data.max()))
self.store_data("array_data", data) | gpl-2.0 |
danielpalomino/gem5 | src/arch/x86/isa/insts/general_purpose/data_transfer/move.py | 44 | 9098 | # Copyright (c) 2007-2008 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
#
# Regular moves
#
def macroop MOV_R_MI {
limm t1, imm, dataSize=asz
ld reg, seg, [1, t0, t1]
};
def macroop MOV_MI_R {
limm t1, imm, dataSize=asz
st reg, seg, [1, t0, t1]
};
def macroop MOV_R_R {
mov reg, reg, regm
};
def macroop MOV_M_R {
st reg, seg, sib, disp
};
def macroop MOV_P_R {
rdip t7
st reg, seg, riprel, disp
};
def macroop MOV_R_M {
ld reg, seg, sib, disp
};
def macroop MOV_R_P {
rdip t7
ld reg, seg, riprel, disp
};
def macroop MOV_R_I {
limm reg, imm
};
def macroop MOV_M_I {
limm t1, imm
st t1, seg, sib, disp
};
def macroop MOV_P_I {
rdip t7
limm t1, imm
st t1, seg, riprel, disp
};
#
# Sign extending moves
#
def macroop MOVSXD_R_R {
sexti reg, regm, 31
};
def macroop MOVSXD_R_M {
ld t1, seg, sib, disp, dataSize=4
sexti reg, t1, 31
};
def macroop MOVSXD_R_P {
rdip t7
ld t1, seg, riprel, disp, dataSize=4
sexti reg, t1, 31
};
def macroop MOVSX_B_R_R {
mov t1, t1, regm, dataSize=1
sexti reg, t1, 7
};
def macroop MOVSX_B_R_M {
ld t1, seg, sib, disp, dataSize=1
sexti reg, t1, 7
};
def macroop MOVSX_B_R_P {
rdip t7
ld t1, seg, riprel, disp, dataSize=1
sexti reg, t1, 7
};
def macroop MOVSX_W_R_R {
sexti reg, regm, 15
};
def macroop MOVSX_W_R_M {
ld reg, seg, sib, disp, dataSize=2
sexti reg, reg, 15
};
def macroop MOVSX_W_R_P {
rdip t7
ld reg, seg, riprel, disp, dataSize=2
sexti reg, reg, 15
};
#
# Zero extending moves
#
def macroop MOVZX_B_R_R {
mov t1, t1, regm, dataSize=1
zexti reg, t1, 7
};
def macroop MOVZX_B_R_M {
ld t1, seg, sib, disp, dataSize=1
zexti reg, t1, 7
};
def macroop MOVZX_B_R_P {
rdip t7
ld t1, seg, riprel, disp, dataSize=1
zexti reg, t1, 7
};
def macroop MOVZX_W_R_R {
zexti reg, regm, 15
};
def macroop MOVZX_W_R_M {
ld t1, seg, sib, disp, dataSize=2
zexti reg, t1, 15
};
def macroop MOVZX_W_R_P {
rdip t7
ld t1, seg, riprel, disp, dataSize=2
zexti reg, t1, 15
};
def macroop MOV_C_R {
.serializing
.adjust_env maxOsz
wrcr reg, regm
};
def macroop MOV_R_C {
.serializing
.adjust_env maxOsz
rdcr reg, regm
};
def macroop MOV_D_R {
.serializing
.adjust_env maxOsz
wrdr reg, regm
};
def macroop MOV_R_D {
.adjust_env maxOsz
rddr reg, regm
};
def macroop MOV_R_S {
rdsel reg, regm
};
def macroop MOV_M_S {
rdsel t1, reg
st t1, seg, sib, disp, dataSize=2
};
def macroop MOV_P_S {
rdip t7
rdsel t1, reg
st t1, seg, riprel, disp, dataSize=2
};
def macroop MOV_REAL_S_R {
zexti t2, regm, 15, dataSize=8
slli t3, t2, 4, dataSize=8
wrsel reg, regm
wrbase reg, t3
};
def macroop MOV_REAL_S_M {
ld t1, seg, sib, disp, dataSize=2
zexti t2, t1, 15, dataSize=8
slli t3, t2, 4, dataSize=8
wrsel reg, t1
wrbase reg, t3
};
def macroop MOV_REAL_S_P {
panic "RIP relative addressing shouldn't happen in real mode"
};
def macroop MOV_S_R {
andi t0, regm, 0xFC, flags=(EZF,), dataSize=2
br label("processDescriptor"), flags=(CEZF,)
andi t2, regm, 0xF8, dataSize=8
andi t0, regm, 0x4, flags=(EZF,), dataSize=2
br label("globalDescriptor"), flags=(CEZF,)
ld t3, tsl, [1, t0, t2], dataSize=8, addressSize=8
br label("processDescriptor")
globalDescriptor:
ld t3, tsg, [1, t0, t2], dataSize=8, addressSize=8
processDescriptor:
chks regm, t3, dataSize=8
wrdl reg, t3, regm
wrsel reg, regm
};
def macroop MOV_S_M {
ld t1, seg, sib, disp, dataSize=2
andi t0, t1, 0xFC, flags=(EZF,), dataSize=2
br label("processDescriptor"), flags=(CEZF,)
andi t2, t1, 0xF8, dataSize=8
andi t0, t1, 0x4, flags=(EZF,), dataSize=2
br label("globalDescriptor"), flags=(CEZF,)
ld t3, tsl, [1, t0, t2], dataSize=8, addressSize=8
br label("processDescriptor")
globalDescriptor:
ld t3, tsg, [1, t0, t2], dataSize=8, addressSize=8
processDescriptor:
chks t1, t3, dataSize=8
wrdl reg, t3, t1
wrsel reg, t1
};
def macroop MOV_S_P {
rdip t7
ld t1, seg, riprel, disp, dataSize=2
andi t0, t1, 0xFC, flags=(EZF,), dataSize=2
br label("processDescriptor"), flags=(CEZF,)
andi t2, t1, 0xF8, dataSize=8
andi t0, t1, 0x4, flags=(EZF,), dataSize=2
br label("globalDescriptor"), flags=(CEZF,)
ld t3, tsl, [1, t0, t2], dataSize=8, addressSize=8
br label("processDescriptor")
globalDescriptor:
ld t3, tsg, [1, t0, t2], dataSize=8, addressSize=8
processDescriptor:
chks t1, t3, dataSize=8
wrdl reg, t3, t1
wrsel reg, t1
};
def macroop MOVSS_S_R {
andi t0, regm, 0xFC, flags=(EZF,), dataSize=2
br label("processDescriptor"), flags=(CEZF,)
andi t2, regm, 0xF8, dataSize=8
andi t0, regm, 0x4, flags=(EZF,), dataSize=2
br label("globalDescriptor"), flags=(CEZF,)
ld t3, tsl, [1, t0, t2], dataSize=8, addressSize=8
br label("processDescriptor")
globalDescriptor:
ld t3, tsg, [1, t0, t2], dataSize=8, addressSize=8
processDescriptor:
chks regm, t3, SSCheck, dataSize=8
wrdl reg, t3, regm
wrsel reg, regm
};
def macroop MOVSS_S_M {
ld t1, seg, sib, disp, dataSize=2
andi t0, t1, 0xFC, flags=(EZF,), dataSize=2
br label("processDescriptor"), flags=(CEZF,)
andi t2, t1, 0xF8, dataSize=8
andi t0, t1, 0x4, flags=(EZF,), dataSize=2
br label("globalDescriptor"), flags=(CEZF,)
ld t3, tsl, [1, t0, t2], dataSize=8, addressSize=8
br label("processDescriptor")
globalDescriptor:
ld t3, tsg, [1, t0, t2], dataSize=8, addressSize=8
processDescriptor:
chks t1, t3, SSCheck, dataSize=8
wrdl reg, t3, t1
wrsel reg, t1
};
def macroop MOVSS_S_P {
rdip t7
ld t1, seg, riprel, disp, dataSize=2
andi t0, t1, 0xFC, flags=(EZF,), dataSize=2
br label("processDescriptor"), flags=(CEZF,)
andi t2, t1, 0xF8, dataSize=8
andi t0, t1, 0x4, flags=(EZF,), dataSize=2
br label("globalDescriptor"), flags=(CEZF,)
ld t3, tsl, [1, t0, t2], dataSize=8, addressSize=8
br label("processDescriptor")
globalDescriptor:
ld t3, tsg, [1, t0, t2], dataSize=8, addressSize=8
processDescriptor:
chks t1, t3, SSCheck, dataSize=8
wrdl reg, t3, t1
wrsel reg, t1
};
def macroop MOVNTI_M_R {
st reg, seg, sib, disp
};
def macroop MOVNTI_P_R {
rdip t7
st reg, seg, riprel, disp
};
def macroop MOVD_XMM_R {
mov2fp xmml, regm, srcSize=dsz, destSize=8
lfpimm xmmh, 0
};
def macroop MOVD_XMM_M {
ldfp xmml, seg, sib, disp, dataSize=dsz
lfpimm xmmh, 0
};
def macroop MOVD_XMM_P {
rdip t7
ldfp xmml, seg, riprel, disp, dataSize=dsz
lfpimm xmmh, 0
};
def macroop MOVD_R_XMM {
mov2int reg, xmmlm, size=dsz
};
def macroop MOVD_M_XMM {
stfp xmml, seg, sib, disp, dataSize=dsz
};
def macroop MOVD_P_XMM {
rdip t7
stfp xmml, seg, riprel, disp, dataSize=dsz
};
'''
#let {{
# class MOVD(Inst):
# "GenFault ${new UnimpInstFault}"
#}};
| bsd-3-clause |
shingonoide/odoo | addons/survey/survey.py | 36 | 58953 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and / or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp import SUPERUSER_ID
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT as DF
from openerp.addons.website.models.website import slug
from urlparse import urljoin
from itertools import product
from collections import Counter
from collections import OrderedDict
import datetime
import logging
import re
import uuid
_logger = logging.getLogger(__name__)
class survey_stage(osv.Model):
"""Stages for Kanban view of surveys"""
_name = 'survey.stage'
_description = 'Survey Stage'
_order = 'sequence,id'
_columns = {
'name': fields.char(string="Name", required=True, translate=True),
'sequence': fields.integer(string="Sequence"),
'closed': fields.boolean(string="Closed", help="If closed, people won't be able to answer to surveys in this column."),
'fold': fields.boolean(string="Folded in kanban view")
}
_defaults = {
'sequence': 1,
'closed': False
}
_sql_constraints = [
('positive_sequence', 'CHECK(sequence >= 0)', 'Sequence number MUST be a natural')
]
class survey_survey(osv.Model):
'''Settings for a multi-page/multi-question survey.
Each survey can have one or more attached pages, and each page can display
one or more questions.
'''
_name = 'survey.survey'
_description = 'Survey'
_rec_name = 'title'
_inherit = ['mail.thread', 'ir.needaction_mixin']
# Protected methods #
def _has_questions(self, cr, uid, ids, context=None):
""" Ensure that this survey has at least one page with at least one
question. """
for survey in self.browse(cr, uid, ids, context=context):
if not survey.page_ids or not [page.question_ids
for page in survey.page_ids if page.question_ids]:
return False
return True
## Function fields ##
def _is_designed(self, cr, uid, ids, name, arg, context=None):
res = dict()
for survey in self.browse(cr, uid, ids, context=context):
if not survey.page_ids or not [page.question_ids
for page in survey.page_ids if page.question_ids]:
res[survey.id] = False
else:
res[survey.id] = True
return res
def _get_tot_sent_survey(self, cr, uid, ids, name, arg, context=None):
""" Returns the number of invitations sent for this survey, be they
(partially) completed or not """
res = dict((id, 0) for id in ids)
sur_res_obj = self.pool.get('survey.user_input')
for id in ids:
res[id] = sur_res_obj.search(cr, uid, # SUPERUSER_ID,
[('survey_id', '=', id), ('type', '=', 'link')],
context=context, count=True)
return res
def _get_tot_start_survey(self, cr, uid, ids, name, arg, context=None):
""" Returns the number of started instances of this survey, be they
completed or not """
res = dict((id, 0) for id in ids)
sur_res_obj = self.pool.get('survey.user_input')
for id in ids:
res[id] = sur_res_obj.search(cr, uid, # SUPERUSER_ID,
['&', ('survey_id', '=', id), '|', ('state', '=', 'skip'), ('state', '=', 'done')],
context=context, count=True)
return res
def _get_tot_comp_survey(self, cr, uid, ids, name, arg, context=None):
""" Returns the number of completed instances of this survey """
res = dict((id, 0) for id in ids)
sur_res_obj = self.pool.get('survey.user_input')
for id in ids:
res[id] = sur_res_obj.search(cr, uid, # SUPERUSER_ID,
[('survey_id', '=', id), ('state', '=', 'done')],
context=context, count=True)
return res
def _get_public_url(self, cr, uid, ids, name, arg, context=None):
""" Computes a public URL for the survey """
if context and context.get('relative_url'):
base_url = '/'
else:
base_url = self.pool['ir.config_parameter'].get_param(cr, uid, 'web.base.url')
res = {}
for survey in self.browse(cr, uid, ids, context=context):
res[survey.id] = urljoin(base_url, "survey/start/%s" % slug(survey))
return res
def _get_public_url_html(self, cr, uid, ids, name, arg, context=None):
""" Computes a public URL for the survey (html-embeddable version)"""
urls = self._get_public_url(cr, uid, ids, name, arg, context=context)
for id, url in urls.iteritems():
urls[id] = '<a href="%s">%s</a>' % (url, _("Click here to start survey"))
return urls
def _get_print_url(self, cr, uid, ids, name, arg, context=None):
""" Computes a printing URL for the survey """
if context and context.get('relative_url'):
base_url = '/'
else:
base_url = self.pool['ir.config_parameter'].get_param(cr, uid, 'web.base.url')
res = {}
for survey in self.browse(cr, uid, ids, context=context):
res[survey.id] = urljoin(base_url, "survey/print/%s" % slug(survey))
return res
def _get_result_url(self, cr, uid, ids, name, arg, context=None):
""" Computes an URL for the survey results """
if context and context.get('relative_url'):
base_url = '/'
else:
base_url = self.pool['ir.config_parameter'].get_param(cr, uid, 'web.base.url')
res = {}
for survey in self.browse(cr, uid, ids, context=context):
res[survey.id] = urljoin(base_url, "survey/results/%s" % slug(survey))
return res
# Model fields #
_columns = {
'title': fields.char('Title', required=1, translate=True),
'res_model': fields.char('Category'),
'page_ids': fields.one2many('survey.page', 'survey_id', 'Pages', copy=True),
'stage_id': fields.many2one('survey.stage', string="Stage", ondelete="set null", copy=False),
'auth_required': fields.boolean('Login required',
help="Users with a public link will be requested to login before taking part to the survey",
oldname="authenticate"),
'users_can_go_back': fields.boolean('Users can go back',
help="If checked, users can go back to previous pages."),
'tot_sent_survey': fields.function(_get_tot_sent_survey,
string="Number of sent surveys", type="integer"),
'tot_start_survey': fields.function(_get_tot_start_survey,
string="Number of started surveys", type="integer"),
'tot_comp_survey': fields.function(_get_tot_comp_survey,
string="Number of completed surveys", type="integer"),
'description': fields.html('Description', translate=True,
oldname="description", help="A long description of the purpose of the survey"),
'color': fields.integer('Color Index'),
'user_input_ids': fields.one2many('survey.user_input', 'survey_id',
'User responses', readonly=1),
'designed': fields.function(_is_designed, string="Is designed?",
type="boolean"),
'public_url': fields.function(_get_public_url,
string="Public link", type="char"),
'public_url_html': fields.function(_get_public_url_html,
string="Public link (html version)", type="char"),
'print_url': fields.function(_get_print_url,
string="Print link", type="char"),
'result_url': fields.function(_get_result_url,
string="Results link", type="char"),
'email_template_id': fields.many2one('email.template',
'Email Template', ondelete='set null'),
'thank_you_message': fields.html('Thank you message', translate=True,
help="This message will be displayed when survey is completed"),
'quizz_mode': fields.boolean(string='Quiz mode')
}
def _default_stage(self, cr, uid, context=None):
ids = self.pool['survey.stage'].search(cr, uid, [], limit=1, context=context)
if ids:
return ids[0]
return False
_defaults = {
'color': 0,
'stage_id': lambda self, *a, **kw: self._default_stage(*a, **kw)
}
def _read_group_stage_ids(self, cr, uid, ids, domain, read_group_order=None, access_rights_uid=None, context=None):
""" Read group customization in order to display all the stages in the
kanban view, even if they are empty """
stage_obj = self.pool.get('survey.stage')
order = stage_obj._order
access_rights_uid = access_rights_uid or uid
if read_group_order == 'stage_id desc':
order = '%s desc' % order
stage_ids = stage_obj._search(cr, uid, [], order=order, access_rights_uid=access_rights_uid, context=context)
result = stage_obj.name_get(cr, access_rights_uid, stage_ids, context=context)
# restore order of the search
result.sort(lambda x, y: cmp(stage_ids.index(x[0]), stage_ids.index(y[0])))
fold = {}
for stage in stage_obj.browse(cr, access_rights_uid, stage_ids, context=context):
fold[stage.id] = stage.fold or False
return result, fold
_group_by_full = {
'stage_id': _read_group_stage_ids
}
# Public methods #
def copy_data(self, cr, uid, id, default=None, context=None):
current_rec = self.read(cr, uid, id, fields=['title'], context=context)
title = _("%s (copy)") % (current_rec.get('title'))
default = dict(default or {}, title=title)
return super(survey_survey, self).copy_data(cr, uid, id, default,
context=context)
def next_page(self, cr, uid, user_input, page_id, go_back=False, context=None):
'''The next page to display to the user, knowing that page_id is the id
of the last displayed page.
If page_id == 0, it will always return the first page of the survey.
If all the pages have been displayed and go_back == False, it will
return None
If go_back == True, it will return the *previous* page instead of the
next page.
.. note::
It is assumed here that a careful user will not try to set go_back
to True if she knows that the page to display is the first one!
(doing this will probably cause a giant worm to eat her house)'''
survey = user_input.survey_id
pages = list(enumerate(survey.page_ids))
# First page
if page_id == 0:
return (pages[0][1], 0, len(pages) == 1)
current_page_index = pages.index((filter(lambda p: p[1].id == page_id, pages))[0])
# All the pages have been displayed
if current_page_index == len(pages) - 1 and not go_back:
return (None, -1, False)
# Let's get back, baby!
elif go_back and survey.users_can_go_back:
return (pages[current_page_index - 1][1], current_page_index - 1, False)
else:
# This will show the last page
if current_page_index == len(pages) - 2:
return (pages[current_page_index + 1][1], current_page_index + 1, True)
# This will show a regular page
else:
return (pages[current_page_index + 1][1], current_page_index + 1, False)
def filter_input_ids(self, cr, uid, survey, filters, finished=False, context=None):
'''If user applies any filters, then this function returns list of
filtered user_input_id and label's strings for display data in web.
:param filters: list of dictionary (having: row_id, ansewr_id)
:param finished: True for completely filled survey,Falser otherwise.
:returns list of filtered user_input_ids.
'''
context = context if context else {}
if filters:
input_line_obj = self.pool.get('survey.user_input_line')
domain_filter, choice, filter_display_data = [], [], []
for filter in filters:
row_id, answer_id = filter['row_id'], filter['answer_id']
if row_id == 0:
choice.append(answer_id)
else:
domain_filter.extend(['|', ('value_suggested_row.id', '=', row_id), ('value_suggested.id', '=', answer_id)])
if choice:
domain_filter.insert(0, ('value_suggested.id', 'in', choice))
else:
domain_filter = domain_filter[1:]
line_ids = input_line_obj.search(cr, uid, domain_filter, context=context)
filtered_input_ids = [input.user_input_id.id for input in input_line_obj.browse(cr, uid, line_ids, context=context)]
else:
filtered_input_ids, filter_display_data = [], []
if finished:
user_input = self.pool.get('survey.user_input')
if not filtered_input_ids:
current_filters = user_input.search(cr, uid, [('survey_id', '=', survey.id)], context=context)
user_input_objs = user_input.browse(cr, uid, current_filters, context=context)
else:
user_input_objs = user_input.browse(cr, uid, filtered_input_ids, context=context)
return [input.id for input in user_input_objs if input.state == 'done']
return filtered_input_ids
def get_filter_display_data(self, cr, uid, filters, context):
'''Returns data to display current filters
:param filters: list of dictionary (having: row_id, answer_id)
:param finished: True for completely filled survey, False otherwise.
:returns list of dict having data to display filters.
'''
filter_display_data = []
if filters:
question_obj = self.pool.get('survey.question')
label_obj = self.pool.get('survey.label')
for filter in filters:
row_id, answer_id = filter['row_id'], filter['answer_id']
question_id = label_obj.browse(cr, uid, answer_id, context=context).question_id.id
question = question_obj.browse(cr, uid, question_id, context=context)
if row_id == 0:
labels = label_obj.browse(cr, uid, [answer_id], context=context)
else:
labels = label_obj.browse(cr, uid, [row_id, answer_id], context=context)
filter_display_data.append({'question_text': question.question, 'labels': [label.value for label in labels]})
return filter_display_data
def prepare_result(self, cr, uid, question, current_filters=None, context=None):
''' Compute statistical data for questions by counting number of vote per choice on basis of filter '''
current_filters = current_filters if current_filters else []
context = context if context else {}
result_summary = {}
#Calculate and return statistics for choice
if question.type in ['simple_choice', 'multiple_choice']:
answers = {}
comments = []
[answers.update({label.id: {'text': label.value, 'count': 0, 'answer_id': label.id}}) for label in question.labels_ids]
for input_line in question.user_input_line_ids:
if input_line.answer_type == 'suggestion' and answers.get(input_line.value_suggested.id) and (not(current_filters) or input_line.user_input_id.id in current_filters):
answers[input_line.value_suggested.id]['count'] += 1
if input_line.answer_type == 'text' and (not(current_filters) or input_line.user_input_id.id in current_filters):
comments.append(input_line)
result_summary = {'answers': answers.values(), 'comments': comments}
#Calculate and return statistics for matrix
if question.type == 'matrix':
rows = OrderedDict()
answers = OrderedDict()
res = dict()
comments = []
[rows.update({label.id: label.value}) for label in question.labels_ids_2]
[answers.update({label.id: label.value}) for label in question.labels_ids]
for cell in product(rows.keys(), answers.keys()):
res[cell] = 0
for input_line in question.user_input_line_ids:
if input_line.answer_type == 'suggestion' and (not(current_filters) or input_line.user_input_id.id in current_filters) and input_line.value_suggested_row:
res[(input_line.value_suggested_row.id, input_line.value_suggested.id)] += 1
if input_line.answer_type == 'text' and (not(current_filters) or input_line.user_input_id.id in current_filters):
comments.append(input_line)
result_summary = {'answers': answers, 'rows': rows, 'result': res, 'comments': comments}
#Calculate and return statistics for free_text, textbox, datetime
if question.type in ['free_text', 'textbox', 'datetime']:
result_summary = []
for input_line in question.user_input_line_ids:
if not(current_filters) or input_line.user_input_id.id in current_filters:
result_summary.append(input_line)
#Calculate and return statistics for numerical_box
if question.type == 'numerical_box':
result_summary = {'input_lines': []}
all_inputs = []
for input_line in question.user_input_line_ids:
if not(current_filters) or input_line.user_input_id.id in current_filters:
all_inputs.append(input_line.value_number)
result_summary['input_lines'].append(input_line)
if all_inputs:
result_summary.update({'average': round(sum(all_inputs) / len(all_inputs), 2),
'max': round(max(all_inputs), 2),
'min': round(min(all_inputs), 2),
'sum': sum(all_inputs),
'most_comman': Counter(all_inputs).most_common(5)})
return result_summary
def get_input_summary(self, cr, uid, question, current_filters=None, context=None):
''' Returns overall summary of question e.g. answered, skipped, total_inputs on basis of filter '''
current_filters = current_filters if current_filters else []
context = context if context else {}
result = {}
if question.survey_id.user_input_ids:
total_input_ids = current_filters or [input_id.id for input_id in question.survey_id.user_input_ids if input_id.state != 'new']
result['total_inputs'] = len(total_input_ids)
question_input_ids = []
for user_input in question.user_input_line_ids:
if not user_input.skipped:
question_input_ids.append(user_input.user_input_id.id)
result['answered'] = len(set(question_input_ids) & set(total_input_ids))
result['skipped'] = result['total_inputs'] - result['answered']
return result
# Actions
def action_start_survey(self, cr, uid, ids, context=None):
''' Open the website page with the survey form '''
trail = ""
context = dict(context or {}, relative_url=True)
if 'survey_token' in context:
trail = "/" + context['survey_token']
return {
'type': 'ir.actions.act_url',
'name': "Start Survey",
'target': 'self',
'url': self.read(cr, uid, ids, ['public_url'], context=context)[0]['public_url'] + trail
}
def action_send_survey(self, cr, uid, ids, context=None):
''' Open a window to compose an email, pre-filled with the survey
message '''
if not self._has_questions(cr, uid, ids, context=None):
raise osv.except_osv(_('Error!'), _('You cannot send an invitation for a survey that has no questions.'))
survey_browse = self.pool.get('survey.survey').browse(cr, uid, ids,
context=context)[0]
if survey_browse.stage_id.closed:
raise osv.except_osv(_('Warning!'),
_("You cannot send invitations for closed surveys."))
assert len(ids) == 1, 'This option should only be used for a single \
survey at a time.'
ir_model_data = self.pool.get('ir.model.data')
templates = ir_model_data.get_object_reference(cr, uid,
'survey', 'email_template_survey')
template_id = templates[1] if len(templates) > 0 else False
ctx = dict(context)
ctx.update({'default_model': 'survey.survey',
'default_res_id': ids[0],
'default_survey_id': ids[0],
'default_use_template': bool(template_id),
'default_template_id': template_id,
'default_composition_mode': 'comment'}
)
return {
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'survey.mail.compose.message',
'target': 'new',
'context': ctx,
}
def action_print_survey(self, cr, uid, ids, context=None):
''' Open the website page with the survey printable view '''
trail = ""
context = dict(context or {}, relative_url=True)
if 'survey_token' in context:
trail = "/" + context['survey_token']
return {
'type': 'ir.actions.act_url',
'name': "Print Survey",
'target': 'self',
'url': self.read(cr, uid, ids, ['print_url'], context=context)[0]['print_url'] + trail
}
def action_result_survey(self, cr, uid, ids, context=None):
''' Open the website page with the survey results view '''
context = dict(context or {}, relative_url=True)
return {
'type': 'ir.actions.act_url',
'name': "Results of the Survey",
'target': 'self',
'url': self.read(cr, uid, ids, ['result_url'], context=context)[0]['result_url']
}
def action_test_survey(self, cr, uid, ids, context=None):
''' Open the website page with the survey form into test mode'''
context = dict(context or {}, relative_url=True)
return {
'type': 'ir.actions.act_url',
'name': "Results of the Survey",
'target': 'self',
'url': self.read(cr, uid, ids, ['public_url'], context=context)[0]['public_url'] + "/phantom"
}
class survey_page(osv.Model):
'''A page for a survey.
Pages are essentially containers, allowing to group questions by ordered
screens.
.. note::
A page should be deleted if the survey it belongs to is deleted. '''
_name = 'survey.page'
_description = 'Survey Page'
_rec_name = 'title'
_order = 'sequence,id'
# Model Fields #
_columns = {
'title': fields.char('Page Title', required=1,
translate=True),
'survey_id': fields.many2one('survey.survey', 'Survey',
ondelete='cascade', required=True),
'question_ids': fields.one2many('survey.question', 'page_id',
'Questions', copy=True),
'sequence': fields.integer('Page number'),
'description': fields.html('Description',
help="An introductory text to your page", translate=True,
oldname="note"),
}
_defaults = {
'sequence': 10
}
# Public methods #
def copy_data(self, cr, uid, ids, default=None, context=None):
current_rec = self.read(cr, uid, ids, fields=['title'], context=context)
title = _("%s (copy)") % (current_rec.get('title'))
default = dict(default or {}, title=title)
return super(survey_page, self).copy_data(cr, uid, ids, default,
context=context)
class survey_question(osv.Model):
''' Questions that will be asked in a survey.
Each question can have one of more suggested answers (eg. in case of
dropdown choices, multi-answer checkboxes, radio buttons...).'''
_name = 'survey.question'
_description = 'Survey Question'
_rec_name = 'question'
_order = 'sequence,id'
# Model fields #
_columns = {
# Question metadata
'page_id': fields.many2one('survey.page', 'Survey page',
ondelete='cascade', required=1),
'survey_id': fields.related('page_id', 'survey_id', type='many2one',
relation='survey.survey', string='Survey'),
'sequence': fields.integer(string='Sequence'),
# Question
'question': fields.char('Question Name', required=1, translate=True),
'description': fields.html('Description', help="Use this field to add \
additional explanations about your question", translate=True,
oldname='descriptive_text'),
# Answer
'type': fields.selection([('free_text', 'Long Text Zone'),
('textbox', 'Text Input'),
('numerical_box', 'Numerical Value'),
('datetime', 'Date and Time'),
('simple_choice', 'Multiple choice: only one answer'),
('multiple_choice', 'Multiple choice: multiple answers allowed'),
('matrix', 'Matrix')], 'Type of Question', size=15, required=1),
'matrix_subtype': fields.selection([('simple', 'One choice per row'),
('multiple', 'Multiple choices per row')], 'Matrix Type'),
'labels_ids': fields.one2many('survey.label',
'question_id', 'Types of answers', oldname='answer_choice_ids', copy=True),
'labels_ids_2': fields.one2many('survey.label',
'question_id_2', 'Rows of the Matrix', copy=True),
# labels are used for proposed choices
# if question.type == simple choice | multiple choice
# -> only labels_ids is used
# if question.type == matrix
# -> labels_ids are the columns of the matrix
# -> labels_ids_2 are the rows of the matrix
# Display options
'column_nb': fields.selection([('12', '1'),
('6', '2'),
('4', '3'),
('3', '4'),
('2', '6')],
'Number of columns'),
# These options refer to col-xx-[12|6|4|3|2] classes in Bootstrap
'display_mode': fields.selection([('columns', 'Radio Buttons/Checkboxes'),
('dropdown', 'Selection Box')],
'Display mode'),
# Comments
'comments_allowed': fields.boolean('Show Comments Field',
oldname="allow_comment"),
'comments_message': fields.char('Comment Message', translate=True),
'comment_count_as_answer': fields.boolean('Comment Field is an Answer Choice',
oldname='make_comment_field'),
# Validation
'validation_required': fields.boolean('Validate entry',
oldname='is_validation_require'),
'validation_email': fields.boolean('Input must be an email'),
'validation_length_min': fields.integer('Minimum Text Length'),
'validation_length_max': fields.integer('Maximum Text Length'),
'validation_min_float_value': fields.float('Minimum value'),
'validation_max_float_value': fields.float('Maximum value'),
'validation_min_date': fields.datetime('Minimum Date'),
'validation_max_date': fields.datetime('Maximum Date'),
'validation_error_msg': fields.char('Error message',
oldname='validation_valid_err_msg',
translate=True),
# Constraints on number of answers (matrices)
'constr_mandatory': fields.boolean('Mandatory Answer',
oldname="is_require_answer"),
'constr_error_msg': fields.char("Error message",
oldname='req_error_msg', translate=True),
'user_input_line_ids': fields.one2many('survey.user_input_line',
'question_id', 'Answers',
domain=[('skipped', '=', False)]),
}
_defaults = {
'page_id': lambda self, cr, uid, context: context.get('page_id'),
'sequence': 10,
'type': 'free_text',
'matrix_subtype': 'simple',
'column_nb': '12',
'display_mode': 'columns',
'constr_error_msg': lambda s, cr, uid, c: _('This question requires an answer.'),
'validation_error_msg': lambda s, cr, uid, c: _('The answer you entered has an invalid format.'),
'validation_required': False,
'comments_message': lambda s, cr, uid, c: _('If other, precise:'),
}
_sql_constraints = [
('positive_len_min', 'CHECK (validation_length_min >= 0)', 'A length must be positive!'),
('positive_len_max', 'CHECK (validation_length_max >= 0)', 'A length must be positive!'),
('validation_length', 'CHECK (validation_length_min <= validation_length_max)', 'Max length cannot be smaller than min length!'),
('validation_float', 'CHECK (validation_min_float_value <= validation_max_float_value)', 'Max value cannot be smaller than min value!'),
('validation_date', 'CHECK (validation_min_date <= validation_max_date)', 'Max date cannot be smaller than min date!')
]
def copy_data(self, cr, uid, ids, default=None, context=None):
current_rec = self.read(cr, uid, ids, context=context)
question = _("%s (copy)") % (current_rec.get('question'))
default = dict(default or {}, question=question)
return super(survey_question, self).copy_data(cr, uid, ids, default,
context=context)
# Validation methods
def validate_question(self, cr, uid, question, post, answer_tag, context=None):
''' Validate question, depending on question type and parameters '''
try:
checker = getattr(self, 'validate_' + question.type)
except AttributeError:
_logger.warning(question.type + ": This type of question has no validation method")
return {}
else:
return checker(cr, uid, question, post, answer_tag, context=context)
def validate_free_text(self, cr, uid, question, post, answer_tag, context=None):
errors = {}
answer = post[answer_tag].strip()
# Empty answer to mandatory question
if question.constr_mandatory and not answer:
errors.update({answer_tag: question.constr_error_msg})
return errors
def validate_textbox(self, cr, uid, question, post, answer_tag, context=None):
errors = {}
answer = post[answer_tag].strip()
# Empty answer to mandatory question
if question.constr_mandatory and not answer:
errors.update({answer_tag: question.constr_error_msg})
# Email format validation
# Note: this validation is very basic:
# all the strings of the form
# <something>@<anything>.<extension>
# will be accepted
if answer and question.validation_email:
if not re.match(r"[^@]+@[^@]+\.[^@]+", answer):
errors.update({answer_tag: _('This answer must be an email address')})
# Answer validation (if properly defined)
# Length of the answer must be in a range
if answer and question.validation_required:
if not (question.validation_length_min <= len(answer) <= question.validation_length_max):
errors.update({answer_tag: question.validation_error_msg})
return errors
def validate_numerical_box(self, cr, uid, question, post, answer_tag, context=None):
errors = {}
answer = post[answer_tag].strip()
# Empty answer to mandatory question
if question.constr_mandatory and not answer:
errors.update({answer_tag: question.constr_error_msg})
# Checks if user input is a number
if answer:
try:
floatanswer = float(answer)
except ValueError:
errors.update({answer_tag: _('This is not a number')})
# Answer validation (if properly defined)
if answer and question.validation_required:
# Answer is not in the right range
try:
floatanswer = float(answer) # check that it is a float has been done hereunder
if not (question.validation_min_float_value <= floatanswer <= question.validation_max_float_value):
errors.update({answer_tag: question.validation_error_msg})
except ValueError:
pass
return errors
def validate_datetime(self, cr, uid, question, post, answer_tag, context=None):
errors = {}
answer = post[answer_tag].strip()
# Empty answer to mandatory question
if question.constr_mandatory and not answer:
errors.update({answer_tag: question.constr_error_msg})
# Checks if user input is a datetime
if answer:
try:
dateanswer = datetime.datetime.strptime(answer, DF)
except ValueError:
errors.update({answer_tag: _('This is not a date/time')})
return errors
# Answer validation (if properly defined)
if answer and question.validation_required:
# Answer is not in the right range
try:
dateanswer = datetime.datetime.strptime(answer, DF)
if not (datetime.datetime.strptime(question.validation_min_date, DF) <= dateanswer <= datetime.datetime.strptime(question.validation_max_date, DF)):
errors.update({answer_tag: question.validation_error_msg})
except ValueError: # check that it is a datetime has been done hereunder
pass
return errors
def validate_simple_choice(self, cr, uid, question, post, answer_tag, context=None):
errors = {}
if question.comments_allowed:
comment_tag = "%s_%s" % (answer_tag, 'comment')
# Empty answer to mandatory question
if question.constr_mandatory and not answer_tag in post:
errors.update({answer_tag: question.constr_error_msg})
if question.constr_mandatory and answer_tag in post and post[answer_tag].strip() == '':
errors.update({answer_tag: question.constr_error_msg})
# Answer is a comment and is empty
if question.constr_mandatory and answer_tag in post and post[answer_tag] == "-1" and question.comment_count_as_answer and comment_tag in post and not post[comment_tag].strip():
errors.update({answer_tag: question.constr_error_msg})
return errors
def validate_multiple_choice(self, cr, uid, question, post, answer_tag, context=None):
errors = {}
if question.constr_mandatory:
answer_candidates = dict_keys_startswith(post, answer_tag)
comment_flag = answer_candidates.pop(("%s_%s" % (answer_tag, -1)), None)
if question.comments_allowed:
comment_answer = answer_candidates.pop(("%s_%s" % (answer_tag, 'comment')), '').strip()
# There is no answer neither comments (if comments count as answer)
if not answer_candidates and question.comment_count_as_answer and (not comment_flag or not comment_answer):
errors.update({answer_tag: question.constr_error_msg})
# There is no answer at all
if not answer_candidates and not question.comment_count_as_answer:
errors.update({answer_tag: question.constr_error_msg})
return errors
def validate_matrix(self, cr, uid, question, post, answer_tag, context=None):
errors = {}
if question.constr_mandatory:
lines_number = len(question.labels_ids_2)
answer_candidates = dict_keys_startswith(post, answer_tag)
comment_answer = answer_candidates.pop(("%s_%s" % (answer_tag, 'comment')), '').strip()
# Number of lines that have been answered
if question.matrix_subtype == 'simple':
answer_number = len(answer_candidates)
elif question.matrix_subtype == 'multiple':
answer_number = len(set([sk.rsplit('_', 1)[0] for sk in answer_candidates.keys()]))
else:
raise RuntimeError("Invalid matrix subtype")
# Validate that each line has been answered
if answer_number != lines_number:
errors.update({answer_tag: question.constr_error_msg})
return errors
class survey_label(osv.Model):
''' A suggested answer for a question '''
_name = 'survey.label'
_rec_name = 'value'
_order = 'sequence,id'
_description = 'Survey Label'
def _check_question_not_empty(self, cr, uid, ids, context=None):
'''Ensure that field question_id XOR field question_id_2 is not null'''
for label in self.browse(cr, uid, ids, context=context):
# 'bool()' is required in order to make '!=' act as XOR with objects
return bool(label.question_id) != bool(label.question_id_2)
_columns = {
'question_id': fields.many2one('survey.question', 'Question',
ondelete='cascade'),
'question_id_2': fields.many2one('survey.question', 'Question',
ondelete='cascade'),
'sequence': fields.integer('Label Sequence order'),
'value': fields.char("Suggested value", translate=True,
required=True),
'quizz_mark': fields.float('Score for this answer', help="A positive score indicates a correct answer; a negative or null score indicates a wrong answer"),
}
_defaults = {
'sequence': 10,
}
_constraints = [
(_check_question_not_empty, "A label must be attached to one and only one question", ['question_id', 'question_id_2'])
]
class survey_user_input(osv.Model):
''' Metadata for a set of one user's answers to a particular survey '''
_name = "survey.user_input"
_rec_name = 'date_create'
_description = 'Survey User Input'
def _quizz_get_score(self, cr, uid, ids, name, args, context=None):
ret = dict()
for user_input in self.browse(cr, uid, ids, context=context):
ret[user_input.id] = sum([uil.quizz_mark for uil in user_input.user_input_line_ids] or [0.0])
return ret
_columns = {
'survey_id': fields.many2one('survey.survey', 'Survey', required=True,
readonly=1, ondelete='restrict'),
'date_create': fields.datetime('Creation Date', required=True,
readonly=1, copy=False),
'deadline': fields.datetime("Deadline",
help="Date by which the person can open the survey and submit answers",
oldname="date_deadline"),
'type': fields.selection([('manually', 'Manually'), ('link', 'Link')],
'Answer Type', required=1, readonly=1,
oldname="response_type"),
'state': fields.selection([('new', 'Not started yet'),
('skip', 'Partially completed'),
('done', 'Completed')],
'Status',
readonly=True),
'test_entry': fields.boolean('Test entry', readonly=1),
'token': fields.char("Identification token", readonly=1, required=1, copy=False),
# Optional Identification data
'partner_id': fields.many2one('res.partner', 'Partner', readonly=1),
'email': fields.char("E-mail", readonly=1),
# Displaying data
'last_displayed_page_id': fields.many2one('survey.page',
'Last displayed page'),
# The answers !
'user_input_line_ids': fields.one2many('survey.user_input_line',
'user_input_id', 'Answers', copy=True),
# URLs used to display the answers
'result_url': fields.related('survey_id', 'result_url', type='char',
string="Public link to the survey results"),
'print_url': fields.related('survey_id', 'print_url', type='char',
string="Public link to the empty survey"),
'quizz_score': fields.function(_quizz_get_score, type="float", string="Score for the quiz")
}
_defaults = {
'date_create': fields.datetime.now,
'type': 'manually',
'state': 'new',
'token': lambda s, cr, uid, c: uuid.uuid4().__str__(),
'quizz_score': 0.0,
}
_sql_constraints = [
('unique_token', 'UNIQUE (token)', 'A token must be unique!'),
('deadline_in_the_past', 'CHECK (deadline >= date_create)', 'The deadline cannot be in the past')
]
def do_clean_emptys(self, cr, uid, automatic=False, context=None):
''' Remove empty user inputs that have been created manually
(used as a cronjob declared in data/survey_cron.xml) '''
empty_user_input_ids = self.search(cr, uid, [('type', '=', 'manually'),
('state', '=', 'new'),
('date_create', '<', (datetime.datetime.now() - datetime.timedelta(hours=1)).strftime(DF))],
context=context)
if empty_user_input_ids:
self.unlink(cr, uid, empty_user_input_ids, context=context)
def action_survey_resent(self, cr, uid, ids, context=None):
''' Sent again the invitation '''
record = self.browse(cr, uid, ids[0], context=context)
context = dict(context or {})
context.update({
'survey_resent_token': True,
'default_partner_ids': record.partner_id and [record.partner_id.id] or [],
'default_multi_email': record.email or "",
'default_public': 'email_private',
})
return self.pool.get('survey.survey').action_send_survey(cr, uid,
[record.survey_id.id], context=context)
def action_view_answers(self, cr, uid, ids, context=None):
''' Open the website page with the survey form '''
user_input = self.read(cr, uid, ids, ['print_url', 'token'], context=context)[0]
return {
'type': 'ir.actions.act_url',
'name': "View Answers",
'target': 'self',
'url': '%s/%s' % (user_input['print_url'], user_input['token'])
}
def action_survey_results(self, cr, uid, ids, context=None):
''' Open the website page with the survey results '''
return {
'type': 'ir.actions.act_url',
'name': "Survey Results",
'target': 'self',
'url': self.read(cr, uid, ids, ['result_url'], context=context)[0]['result_url']
}
class survey_user_input_line(osv.Model):
_name = 'survey.user_input_line'
_description = 'Survey User Input Line'
_rec_name = 'date_create'
def _answered_or_skipped(self, cr, uid, ids, context=None):
for uil in self.browse(cr, uid, ids, context=context):
# 'bool()' is required in order to make '!=' act as XOR with objects
return uil.skipped != bool(uil.answer_type)
def _check_answer_type(self, cr, uid, ids, context=None):
for uil in self.browse(cr, uid, ids, context=None):
if uil.answer_type:
if uil.answer_type == 'text':
# 'bool()' is required in order to make '!=' act as XOR with objects
return bool(uil.value_text)
elif uil.answer_type == 'number':
return (uil.value_number == 0) or (uil.value_number != False)
elif uil.answer_type == 'date':
return bool(uil.value_date)
elif uil.answer_type == 'free_text':
return bool(uil.value_free_text)
elif uil.answer_type == 'suggestion':
return bool(uil.value_suggested)
return True
_columns = {
'user_input_id': fields.many2one('survey.user_input', 'User Input',
ondelete='cascade', required=1),
'question_id': fields.many2one('survey.question', 'Question',
ondelete='restrict', required=1),
'page_id': fields.related('question_id', 'page_id', type='many2one',
relation='survey.page', string="Page"),
'survey_id': fields.related('user_input_id', 'survey_id',
type="many2one", relation="survey.survey",
string='Survey', store=True),
'date_create': fields.datetime('Create Date', required=1),
'skipped': fields.boolean('Skipped'),
'answer_type': fields.selection([('text', 'Text'),
('number', 'Number'),
('date', 'Date'),
('free_text', 'Free Text'),
('suggestion', 'Suggestion')],
'Answer Type'),
'value_text': fields.char("Text answer"),
'value_number': fields.float("Numerical answer"),
'value_date': fields.datetime("Date answer"),
'value_free_text': fields.text("Free Text answer"),
'value_suggested': fields.many2one('survey.label', "Suggested answer"),
'value_suggested_row': fields.many2one('survey.label', "Row answer"),
'quizz_mark': fields.float("Score given for this answer")
}
_defaults = {
'skipped': False,
'date_create': fields.datetime.now()
}
_constraints = [
(_answered_or_skipped, "A question cannot be unanswered and skipped", ['skipped', 'answer_type']),
(_check_answer_type, "The answer must be in the right type", ['answer_type', 'text', 'number', 'date', 'free_text', 'suggestion'])
]
def __get_mark(self, cr, uid, value_suggested, context=None):
try:
mark = self.pool.get('survey.label').browse(cr, uid, int(value_suggested), context=context).quizz_mark
except AttributeError:
mark = 0.0
except KeyError:
mark = 0.0
except ValueError:
mark = 0.0
return mark
def create(self, cr, uid, vals, context=None):
value_suggested = vals.get('value_suggested')
if value_suggested:
vals.update({'quizz_mark': self.__get_mark(cr, uid, value_suggested)})
return super(survey_user_input_line, self).create(cr, uid, vals, context=context)
def write(self, cr, uid, ids, vals, context=None):
value_suggested = vals.get('value_suggested')
if value_suggested:
vals.update({'quizz_mark': self.__get_mark(cr, uid, value_suggested)})
return super(survey_user_input_line, self).write(cr, uid, ids, vals, context=context)
def save_lines(self, cr, uid, user_input_id, question, post, answer_tag,
context=None):
''' Save answers to questions, depending on question type
If an answer already exists for question and user_input_id, it will be
overwritten (in order to maintain data consistency). '''
try:
saver = getattr(self, 'save_line_' + question.type)
except AttributeError:
_logger.error(question.type + ": This type of question has no saving function")
return False
else:
saver(cr, uid, user_input_id, question, post, answer_tag, context=context)
def save_line_free_text(self, cr, uid, user_input_id, question, post, answer_tag, context=None):
vals = {
'user_input_id': user_input_id,
'question_id': question.id,
'page_id': question.page_id.id,
'survey_id': question.survey_id.id,
'skipped': False,
}
if answer_tag in post and post[answer_tag].strip() != '':
vals.update({'answer_type': 'free_text', 'value_free_text': post[answer_tag]})
else:
vals.update({'answer_type': None, 'skipped': True})
old_uil = self.search(cr, uid, [('user_input_id', '=', user_input_id),
('survey_id', '=', question.survey_id.id),
('question_id', '=', question.id)],
context=context)
if old_uil:
self.write(cr, uid, old_uil[0], vals, context=context)
else:
self.create(cr, uid, vals, context=context)
return True
def save_line_textbox(self, cr, uid, user_input_id, question, post, answer_tag, context=None):
vals = {
'user_input_id': user_input_id,
'question_id': question.id,
'page_id': question.page_id.id,
'survey_id': question.survey_id.id,
'skipped': False
}
if answer_tag in post and post[answer_tag].strip() != '':
vals.update({'answer_type': 'text', 'value_text': post[answer_tag]})
else:
vals.update({'answer_type': None, 'skipped': True})
old_uil = self.search(cr, uid, [('user_input_id', '=', user_input_id),
('survey_id', '=', question.survey_id.id),
('question_id', '=', question.id)],
context=context)
if old_uil:
self.write(cr, uid, old_uil[0], vals, context=context)
else:
self.create(cr, uid, vals, context=context)
return True
def save_line_numerical_box(self, cr, uid, user_input_id, question, post, answer_tag, context=None):
vals = {
'user_input_id': user_input_id,
'question_id': question.id,
'page_id': question.page_id.id,
'survey_id': question.survey_id.id,
'skipped': False
}
if answer_tag in post and post[answer_tag].strip() != '':
vals.update({'answer_type': 'number', 'value_number': float(post[answer_tag])})
else:
vals.update({'answer_type': None, 'skipped': True})
old_uil = self.search(cr, uid, [('user_input_id', '=', user_input_id),
('survey_id', '=', question.survey_id.id),
('question_id', '=', question.id)],
context=context)
if old_uil:
self.write(cr, uid, old_uil[0], vals, context=context)
else:
self.create(cr, uid, vals, context=context)
return True
def save_line_datetime(self, cr, uid, user_input_id, question, post, answer_tag, context=None):
vals = {
'user_input_id': user_input_id,
'question_id': question.id,
'page_id': question.page_id.id,
'survey_id': question.survey_id.id,
'skipped': False
}
if answer_tag in post and post[answer_tag].strip() != '':
vals.update({'answer_type': 'date', 'value_date': post[answer_tag]})
else:
vals.update({'answer_type': None, 'skipped': True})
old_uil = self.search(cr, uid, [('user_input_id', '=', user_input_id),
('survey_id', '=', question.survey_id.id),
('question_id', '=', question.id)],
context=context)
if old_uil:
self.write(cr, uid, old_uil[0], vals, context=context)
else:
self.create(cr, uid, vals, context=context)
return True
def save_line_simple_choice(self, cr, uid, user_input_id, question, post, answer_tag, context=None):
vals = {
'user_input_id': user_input_id,
'question_id': question.id,
'page_id': question.page_id.id,
'survey_id': question.survey_id.id,
'skipped': False
}
old_uil = self.search(cr, uid, [('user_input_id', '=', user_input_id),
('survey_id', '=', question.survey_id.id),
('question_id', '=', question.id)],
context=context)
if old_uil:
self.unlink(cr, SUPERUSER_ID, old_uil, context=context)
if answer_tag in post and post[answer_tag].strip() != '':
vals.update({'answer_type': 'suggestion', 'value_suggested': post[answer_tag]})
else:
vals.update({'answer_type': None, 'skipped': True})
# '-1' indicates 'comment count as an answer' so do not need to record it
if post.get(answer_tag) and post.get(answer_tag) != '-1':
self.create(cr, uid, vals, context=context)
comment_answer = post.pop(("%s_%s" % (answer_tag, 'comment')), '').strip()
if comment_answer:
vals.update({'answer_type': 'text', 'value_text': comment_answer, 'skipped': False, 'value_suggested': False})
self.create(cr, uid, vals, context=context)
return True
def save_line_multiple_choice(self, cr, uid, user_input_id, question, post, answer_tag, context=None):
vals = {
'user_input_id': user_input_id,
'question_id': question.id,
'page_id': question.page_id.id,
'survey_id': question.survey_id.id,
'skipped': False
}
old_uil = self.search(cr, uid, [('user_input_id', '=', user_input_id),
('survey_id', '=', question.survey_id.id),
('question_id', '=', question.id)],
context=context)
if old_uil:
self.unlink(cr, SUPERUSER_ID, old_uil, context=context)
ca = dict_keys_startswith(post, answer_tag+"_")
comment_answer = ca.pop(("%s_%s" % (answer_tag, 'comment')), '').strip()
if len(ca) > 0:
for a in ca:
# '-1' indicates 'comment count as an answer' so do not need to record it
if a != ('%s_%s' % (answer_tag, '-1')):
vals.update({'answer_type': 'suggestion', 'value_suggested': ca[a]})
self.create(cr, uid, vals, context=context)
if comment_answer:
vals.update({'answer_type': 'text', 'value_text': comment_answer, 'value_suggested': False})
self.create(cr, uid, vals, context=context)
if not ca and not comment_answer:
vals.update({'answer_type': None, 'skipped': True})
self.create(cr, uid, vals, context=context)
return True
def save_line_matrix(self, cr, uid, user_input_id, question, post, answer_tag, context=None):
vals = {
'user_input_id': user_input_id,
'question_id': question.id,
'page_id': question.page_id.id,
'survey_id': question.survey_id.id,
'skipped': False
}
old_uil = self.search(cr, uid, [('user_input_id', '=', user_input_id),
('survey_id', '=', question.survey_id.id),
('question_id', '=', question.id)],
context=context)
if old_uil:
self.unlink(cr, SUPERUSER_ID, old_uil, context=context)
no_answers = True
ca = dict_keys_startswith(post, answer_tag+"_")
comment_answer = ca.pop(("%s_%s" % (answer_tag, 'comment')), '').strip()
if comment_answer:
vals.update({'answer_type': 'text', 'value_text': comment_answer})
self.create(cr, uid, vals, context=context)
no_answers = False
if question.matrix_subtype == 'simple':
for row in question.labels_ids_2:
a_tag = "%s_%s" % (answer_tag, row.id)
if a_tag in ca:
no_answers = False
vals.update({'answer_type': 'suggestion', 'value_suggested': ca[a_tag], 'value_suggested_row': row.id})
self.create(cr, uid, vals, context=context)
elif question.matrix_subtype == 'multiple':
for col in question.labels_ids:
for row in question.labels_ids_2:
a_tag = "%s_%s_%s" % (answer_tag, row.id, col.id)
if a_tag in ca:
no_answers = False
vals.update({'answer_type': 'suggestion', 'value_suggested': col.id, 'value_suggested_row': row.id})
self.create(cr, uid, vals, context=context)
if no_answers:
vals.update({'answer_type': None, 'skipped': True})
self.create(cr, uid, vals, context=context)
return True
def dict_keys_startswith(dictionary, string):
'''Returns a dictionary containing the elements of <dict> whose keys start
with <string>.
.. note::
This function uses dictionary comprehensions (Python >= 2.7)'''
return {k: dictionary[k] for k in filter(lambda key: key.startswith(string), dictionary.keys())}
| agpl-3.0 |
pcodes/mhn_interface | mhn_api/stats.py | 1 | 1127 | import requests
from django.conf import settings
def get_request(params):
if settings.DEBUG:
api_key = {'api_key': settings.MHN_DEV_KEY}
else:
api_key = {'api_key': settings.MHN_API_KEY}
base_url = settings.MHN_URL
if not params:
request = api_key
else:
request = {**api_key, **params}
response = requests.get(base_url, params=request).json()
return response
def get_past_time_attacks(time):
time_frame = {'hours_ago' : str(time)}
past_attacks = get_request(time_frame)
return len(past_attacks['data'])
def get_user_time_attacks(time, honey_id):
time_frame = {'hours_ago' : str(time), 'identifier': honey_id}
past_attacks = get_request(time_frame)
return len(past_attacks['data'])
def get_user_attacks(honeypot_id):
id_param = {'identifier': honeypot_id}
user_attacks = get_request(id_param)
output = reverse_list(user_attacks['data'])
return output
def reverse_list(input_list):
output = []
for x in range(0, len(input_list)):
output.append(input_list[len(input_list) - 1 - x])
return output
| mit |
reidwooten99/botbot-web | botbot/apps/accounts/forms.py | 2 | 1038 | from django import forms
from . import models
class AccountForm(forms.ModelForm):
class Meta:
model = models.User
exclude = ('first_name', 'last_name', 'password', 'is_staff',
'is_active', 'is_superuser', 'last_login', 'date_joined',
'groups', 'user_permissions', 'email')
class TimezoneForm(forms.Form):
CHOICES = [('', '')]
CHOICES.extend(models.TIMEZONE_CHOICES)
timezone = forms.ChoiceField(choices=CHOICES, required=False)
def __init__(self, request, *args, **kwargs):
super(TimezoneForm, self).__init__(*args, **kwargs)
self.request = request
self.fields['timezone'].initial = request.session.get('django_timezone',
"")
def save(self):
tz = self.cleaned_data['timezone']
self.request.session['django_timezone'] = tz
if self.request.user.is_authenticated():
self.request.user.timezone = tz
self.request.user.save()
| mit |
abhinavp13/IITBX-edx-platform-dev | common/djangoapps/student/tests/test_email.py | 14 | 11282 | import json
import django.db
from student.tests.factories import UserFactory, RegistrationFactory, PendingEmailChangeFactory
from student.views import reactivation_email_for_user, change_email_request, confirm_email_change
from student.models import UserProfile, PendingEmailChange
from django.contrib.auth.models import User
from django.test import TestCase, TransactionTestCase
from django.test.client import RequestFactory
from mock import Mock, patch
from django.http import Http404, HttpResponse
from django.conf import settings
from nose.plugins.skip import SkipTest
class TestException(Exception):
"""Exception used for testing that nothing will catch explicitly"""
pass
def mock_render_to_string(template_name, context):
"""Return a string that encodes template_name and context"""
return str((template_name, sorted(context.iteritems())))
def mock_render_to_response(template_name, context):
"""Return an HttpResponse with content that encodes template_name and context"""
return HttpResponse(mock_render_to_string(template_name, context))
class EmailTestMixin(object):
"""Adds useful assertions for testing `email_user`"""
def assertEmailUser(self, email_user, subject_template, subject_context, body_template, body_context):
"""Assert that `email_user` was used to send and email with the supplied subject and body
`email_user`: The mock `django.contrib.auth.models.User.email_user` function
to verify
`subject_template`: The template to have been used for the subject
`subject_context`: The context to have been used for the subject
`body_template`: The template to have been used for the body
`body_context`: The context to have been used for the body
"""
email_user.assert_called_with(
mock_render_to_string(subject_template, subject_context),
mock_render_to_string(body_template, body_context),
settings.DEFAULT_FROM_EMAIL
)
@patch('student.views.render_to_string', Mock(side_effect=mock_render_to_string, autospec=True))
@patch('django.contrib.auth.models.User.email_user')
class ReactivationEmailTests(EmailTestMixin, TestCase):
"""Test sending a reactivation email to a user"""
def setUp(self):
self.user = UserFactory.create()
self.unregisteredUser = UserFactory.create()
self.registration = RegistrationFactory.create(user=self.user)
def reactivation_email(self, user):
"""
Send the reactivation email to the specified user,
and return the response as json data.
"""
return json.loads(reactivation_email_for_user(user).content)
def assertReactivateEmailSent(self, email_user):
"""Assert that the correct reactivation email has been sent"""
context = {
'name': self.user.profile.name,
'key': self.registration.activation_key
}
self.assertEmailUser(
email_user,
'emails/activation_email_subject.txt',
context,
'emails/activation_email.txt',
context
)
def test_reactivation_email_failure(self, email_user):
self.user.email_user.side_effect = Exception
response_data = self.reactivation_email(self.user)
self.assertReactivateEmailSent(email_user)
self.assertFalse(response_data['success'])
def test_reactivation_for_unregistered_user(self, email_user):
"""
Test that trying to send a reactivation email to an unregistered
user fails without throwing a 500 error.
"""
response_data = self.reactivation_email(self.unregisteredUser)
self.assertFalse(response_data['success'])
def test_reactivation_email_success(self, email_user):
response_data = self.reactivation_email(self.user)
self.assertReactivateEmailSent(email_user)
self.assertTrue(response_data['success'])
class EmailChangeRequestTests(TestCase):
"""Test changing a user's email address"""
def setUp(self):
self.user = UserFactory.create()
self.new_email = '[email protected]'
self.req_factory = RequestFactory()
self.request = self.req_factory.post('unused_url', data={
'password': 'test',
'new_email': self.new_email
})
self.request.user = self.user
self.user.email_user = Mock()
def run_request(self, request=None):
"""Execute request and return result parsed as json
If request isn't passed in, use self.request instead
"""
if request is None:
request = self.request
response = change_email_request(self.request)
return json.loads(response.content)
def assertFailedRequest(self, response_data, expected_error):
"""Assert that `response_data` indicates a failed request that returns `expected_error`"""
self.assertFalse(response_data['success'])
self.assertEquals(expected_error, response_data['error'])
self.assertFalse(self.user.email_user.called)
def test_unauthenticated(self):
self.user.is_authenticated = False
with self.assertRaises(Http404):
change_email_request(self.request)
self.assertFalse(self.user.email_user.called)
def test_invalid_password(self):
self.request.POST['password'] = 'wrong'
self.assertFailedRequest(self.run_request(), 'Invalid password')
def test_invalid_emails(self):
for email in ('bad_email', 'bad_email@', '@bad_email'):
self.request.POST['new_email'] = email
self.assertFailedRequest(self.run_request(), 'Valid e-mail address required.')
def check_duplicate_email(self, email):
"""Test that a request to change a users email to `email` fails"""
request = self.req_factory.post('unused_url', data={
'new_email': email,
'password': 'test',
})
request.user = self.user
self.assertFailedRequest(self.run_request(request), 'An account with this e-mail already exists.')
def test_duplicate_email(self):
UserFactory.create(email=self.new_email)
self.check_duplicate_email(self.new_email)
def test_capitalized_duplicate_email(self):
"""Test that we check for email addresses in a case insensitive way"""
UserFactory.create(email=self.new_email)
self.check_duplicate_email(self.new_email.capitalize())
# TODO: Finish testing the rest of change_email_request
@patch('django.contrib.auth.models.User.email_user')
@patch('student.views.render_to_response', Mock(side_effect=mock_render_to_response, autospec=True))
@patch('student.views.render_to_string', Mock(side_effect=mock_render_to_string, autospec=True))
class EmailChangeConfirmationTests(EmailTestMixin, TransactionTestCase):
"""Test that confirmation of email change requests function even in the face of exceptions thrown while sending email"""
def setUp(self):
self.user = UserFactory.create()
self.profile = UserProfile.objects.get(user=self.user)
self.req_factory = RequestFactory()
self.request = self.req_factory.get('unused_url')
self.request.user = self.user
self.user.email_user = Mock()
self.pending_change_request = PendingEmailChangeFactory.create(user=self.user)
self.key = self.pending_change_request.activation_key
def assertRolledBack(self):
"""Assert that no changes to user, profile, or pending email have been made to the db"""
self.assertEquals(self.user.email, User.objects.get(username=self.user.username).email)
self.assertEquals(self.profile.meta, UserProfile.objects.get(user=self.user).meta)
self.assertEquals(1, PendingEmailChange.objects.count())
def assertFailedBeforeEmailing(self, email_user):
"""Assert that the function failed before emailing a user"""
self.assertRolledBack()
self.assertFalse(email_user.called)
def check_confirm_email_change(self, expected_template, expected_context):
"""Call `confirm_email_change` and assert that the content was generated as expected
`expected_template`: The name of the template that should have been used
to generate the content
`expected_context`: The context dictionary that should have been used to
generate the content
"""
response = confirm_email_change(self.request, self.key)
self.assertEquals(
mock_render_to_response(expected_template, expected_context).content,
response.content
)
def assertChangeEmailSent(self, email_user):
"""Assert that the correct email was sent to confirm an email change"""
context = {
'old_email': self.user.email,
'new_email': self.pending_change_request.new_email,
}
self.assertEmailUser(
email_user,
'emails/email_change_subject.txt',
context,
'emails/confirm_email_change.txt',
context
)
def test_not_pending(self, email_user):
self.key = 'not_a_key'
self.check_confirm_email_change('invalid_email_key.html', {})
self.assertFailedBeforeEmailing(email_user)
def test_duplicate_email(self, email_user):
UserFactory.create(email=self.pending_change_request.new_email)
self.check_confirm_email_change('email_exists.html', {})
self.assertFailedBeforeEmailing(email_user)
def test_old_email_fails(self, email_user):
email_user.side_effect = [Exception, None]
self.check_confirm_email_change('email_change_failed.html', {
'email': self.user.email,
})
self.assertRolledBack()
self.assertChangeEmailSent(email_user)
def test_new_email_fails(self, email_user):
email_user.side_effect = [None, Exception]
self.check_confirm_email_change('email_change_failed.html', {
'email': self.pending_change_request.new_email
})
self.assertRolledBack()
self.assertChangeEmailSent(email_user)
def test_successful_email_change(self, email_user):
self.check_confirm_email_change('email_change_successful.html', {
'old_email': self.user.email,
'new_email': self.pending_change_request.new_email
})
self.assertChangeEmailSent(email_user)
meta = json.loads(UserProfile.objects.get(user=self.user).meta)
self.assertIn('old_emails', meta)
self.assertEquals(self.user.email, meta['old_emails'][0][0])
self.assertEquals(
self.pending_change_request.new_email,
User.objects.get(username=self.user.username).email
)
self.assertEquals(0, PendingEmailChange.objects.count())
@patch('student.views.PendingEmailChange.objects.get', Mock(side_effect=TestException))
@patch('student.views.transaction.rollback', wraps=django.db.transaction.rollback)
def test_always_rollback(self, rollback, _email_user):
with self.assertRaises(TestException):
confirm_email_change(self.request, self.key)
rollback.assert_called_with()
| agpl-3.0 |
pymedusa/SickRage | ext/boto/s3/key.py | 5 | 83034 | # Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2011, Nexenta Systems Inc.
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import email.utils
import errno
import hashlib
import mimetypes
import os
import re
import base64
import binascii
import math
from hashlib import md5
import boto.utils
from boto.compat import BytesIO, six, urllib, encodebytes
from boto.exception import BotoClientError
from boto.exception import StorageDataError
from boto.exception import PleaseRetryException
from boto.provider import Provider
from boto.s3.keyfile import KeyFile
from boto.s3.user import User
from boto import UserAgent
from boto.utils import compute_md5, compute_hash
from boto.utils import find_matching_headers
from boto.utils import merge_headers_by_name
class Key(object):
"""
Represents a key (object) in an S3 bucket.
:ivar bucket: The parent :class:`boto.s3.bucket.Bucket`.
:ivar name: The name of this Key object.
:ivar metadata: A dictionary containing user metadata that you
wish to store with the object or that has been retrieved from
an existing object.
:ivar cache_control: The value of the `Cache-Control` HTTP header.
:ivar content_type: The value of the `Content-Type` HTTP header.
:ivar content_encoding: The value of the `Content-Encoding` HTTP header.
:ivar content_disposition: The value of the `Content-Disposition` HTTP
header.
:ivar content_language: The value of the `Content-Language` HTTP header.
:ivar etag: The `etag` associated with this object.
:ivar last_modified: The string timestamp representing the last
time this object was modified in S3.
:ivar owner: The ID of the owner of this object.
:ivar storage_class: The storage class of the object. Currently, one of:
STANDARD | REDUCED_REDUNDANCY | GLACIER
:ivar md5: The MD5 hash of the contents of the object.
:ivar size: The size, in bytes, of the object.
:ivar version_id: The version ID of this object, if it is a versioned
object.
:ivar encrypted: Whether the object is encrypted while at rest on
the server.
"""
DefaultContentType = 'application/octet-stream'
RestoreBody = """<?xml version="1.0" encoding="UTF-8"?>
<RestoreRequest xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<Days>%s</Days>
</RestoreRequest>"""
BufferSize = boto.config.getint('Boto', 'key_buffer_size', 8192)
# The object metadata fields a user can set, other than custom metadata
# fields (i.e., those beginning with a provider-specific prefix like
# x-amz-meta).
base_user_settable_fields = set(["cache-control", "content-disposition",
"content-encoding", "content-language",
"content-md5", "content-type",
"x-robots-tag", "expires"])
_underscore_base_user_settable_fields = set()
for f in base_user_settable_fields:
_underscore_base_user_settable_fields.add(f.replace('-', '_'))
# Metadata fields, whether user-settable or not, other than custom
# metadata fields (i.e., those beginning with a provider specific prefix
# like x-amz-meta).
base_fields = (base_user_settable_fields |
set(["last-modified", "content-length", "date", "etag"]))
def __init__(self, bucket=None, name=None):
self.bucket = bucket
self.name = name
self.metadata = {}
self.cache_control = None
self.content_type = self.DefaultContentType
self.content_encoding = None
self.content_disposition = None
self.content_language = None
self.filename = None
self.etag = None
self.is_latest = False
self.last_modified = None
self.owner = None
self._storage_class = None
self.path = None
self.resp = None
self.mode = None
self.size = None
self.version_id = None
self.source_version_id = None
self.delete_marker = False
self.encrypted = None
# If the object is being restored, this attribute will be set to True.
# If the object is restored, it will be set to False. Otherwise this
# value will be None. If the restore is completed (ongoing_restore =
# False), the expiry_date will be populated with the expiry date of the
# restored object.
self.ongoing_restore = None
self.expiry_date = None
self.local_hashes = {}
def __repr__(self):
if self.bucket:
name = u'<Key: %s,%s>' % (self.bucket.name, self.name)
else:
name = u'<Key: None,%s>' % self.name
# Encode to bytes for Python 2 to prevent display decoding issues
if not isinstance(name, str):
name = name.encode('utf-8')
return name
def __iter__(self):
return self
@property
def provider(self):
provider = None
if self.bucket and self.bucket.connection:
provider = self.bucket.connection.provider
return provider
def _get_key(self):
return self.name
def _set_key(self, value):
self.name = value
key = property(_get_key, _set_key);
def _get_md5(self):
if 'md5' in self.local_hashes and self.local_hashes['md5']:
return binascii.b2a_hex(self.local_hashes['md5'])
def _set_md5(self, value):
if value:
self.local_hashes['md5'] = binascii.a2b_hex(value)
elif 'md5' in self.local_hashes:
self.local_hashes.pop('md5', None)
md5 = property(_get_md5, _set_md5);
def _get_base64md5(self):
if 'md5' in self.local_hashes and self.local_hashes['md5']:
md5 = self.local_hashes['md5']
if not isinstance(md5, bytes):
md5 = md5.encode('utf-8')
return binascii.b2a_base64(md5).decode('utf-8').rstrip('\n')
def _set_base64md5(self, value):
if value:
if not isinstance(value, six.string_types):
value = value.decode('utf-8')
self.local_hashes['md5'] = binascii.a2b_base64(value)
elif 'md5' in self.local_hashes:
del self.local_hashes['md5']
base64md5 = property(_get_base64md5, _set_base64md5);
def _get_storage_class(self):
if self._storage_class is None and self.bucket:
# Attempt to fetch storage class
list_items = list(self.bucket.list(self.name.encode('utf-8')))
if len(list_items) and getattr(list_items[0], '_storage_class',
None):
self._storage_class = list_items[0]._storage_class
else:
# Key is not yet saved? Just use default...
self._storage_class = 'STANDARD'
return self._storage_class
def _set_storage_class(self, value):
self._storage_class = value
storage_class = property(_get_storage_class, _set_storage_class)
def get_md5_from_hexdigest(self, md5_hexdigest):
"""
A utility function to create the 2-tuple (md5hexdigest, base64md5)
from just having a precalculated md5_hexdigest.
"""
digest = binascii.unhexlify(md5_hexdigest)
base64md5 = encodebytes(digest)
if base64md5[-1] == '\n':
base64md5 = base64md5[0:-1]
return (md5_hexdigest, base64md5)
def handle_encryption_headers(self, resp):
provider = self.bucket.connection.provider
if provider.server_side_encryption_header:
self.encrypted = resp.getheader(
provider.server_side_encryption_header, None)
else:
self.encrypted = None
def handle_storage_class_header(self, resp):
provider = self.bucket.connection.provider
if provider.storage_class_header:
self._storage_class = resp.getheader(
provider.storage_class_header, None)
if (self._storage_class is None and
provider.get_provider_name() == 'aws'):
# S3 docs for HEAD object requests say S3 will return this
# header for all objects except Standard storage class objects.
self._storage_class = 'STANDARD'
def handle_version_headers(self, resp, force=False):
provider = self.bucket.connection.provider
# If the Key object already has a version_id attribute value, it
# means that it represents an explicit version and the user is
# doing a get_contents_*(version_id=<foo>) to retrieve another
# version of the Key. In that case, we don't really want to
# overwrite the version_id in this Key object. Comprende?
if self.version_id is None or force:
self.version_id = resp.getheader(provider.version_id, None)
self.source_version_id = resp.getheader(provider.copy_source_version_id,
None)
if resp.getheader(provider.delete_marker, 'false') == 'true':
self.delete_marker = True
else:
self.delete_marker = False
def handle_restore_headers(self, response):
provider = self.bucket.connection.provider
header = response.getheader(provider.restore_header)
if header is None:
return
parts = header.split(',', 1)
for part in parts:
key, val = [i.strip() for i in part.split('=')]
val = val.replace('"', '')
if key == 'ongoing-request':
self.ongoing_restore = True if val.lower() == 'true' else False
elif key == 'expiry-date':
self.expiry_date = val
def handle_addl_headers(self, headers):
"""
Used by Key subclasses to do additional, provider-specific
processing of response headers. No-op for this base class.
"""
pass
def open_read(self, headers=None, query_args='',
override_num_retries=None, response_headers=None):
"""
Open this key for reading
:type headers: dict
:param headers: Headers to pass in the web request
:type query_args: string
:param query_args: Arguments to pass in the query string
(ie, 'torrent')
:type override_num_retries: int
:param override_num_retries: If not None will override configured
num_retries parameter for underlying GET.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
"""
if self.resp is None:
self.mode = 'r'
provider = self.bucket.connection.provider
self.resp = self.bucket.connection.make_request(
'GET', self.bucket.name, self.name, headers,
query_args=query_args,
override_num_retries=override_num_retries)
if self.resp.status < 199 or self.resp.status > 299:
body = self.resp.read()
raise provider.storage_response_error(self.resp.status,
self.resp.reason, body)
response_headers = self.resp.msg
self.metadata = boto.utils.get_aws_metadata(response_headers,
provider)
for name, value in response_headers.items():
# To get correct size for Range GETs, use Content-Range
# header if one was returned. If not, use Content-Length
# header.
if (name.lower() == 'content-length' and
'Content-Range' not in response_headers):
self.size = int(value)
elif name.lower() == 'content-range':
end_range = re.sub('.*/(.*)', '\\1', value)
self.size = int(end_range)
elif name.lower() in Key.base_fields:
self.__dict__[name.lower().replace('-', '_')] = value
self.handle_version_headers(self.resp)
self.handle_encryption_headers(self.resp)
self.handle_restore_headers(self.resp)
self.handle_addl_headers(self.resp.getheaders())
def open_write(self, headers=None, override_num_retries=None):
"""
Open this key for writing.
Not yet implemented
:type headers: dict
:param headers: Headers to pass in the write request
:type override_num_retries: int
:param override_num_retries: If not None will override configured
num_retries parameter for underlying PUT.
"""
raise BotoClientError('Not Implemented')
def open(self, mode='r', headers=None, query_args=None,
override_num_retries=None):
if mode == 'r':
self.mode = 'r'
self.open_read(headers=headers, query_args=query_args,
override_num_retries=override_num_retries)
elif mode == 'w':
self.mode = 'w'
self.open_write(headers=headers,
override_num_retries=override_num_retries)
else:
raise BotoClientError('Invalid mode: %s' % mode)
closed = False
def close(self, fast=False):
"""
Close this key.
:type fast: bool
:param fast: True if you want the connection to be closed without first
reading the content. This should only be used in cases where subsequent
calls don't need to return the content from the open HTTP connection.
Note: As explained at
http://docs.python.org/2/library/httplib.html#httplib.HTTPConnection.getresponse,
callers must read the whole response before sending a new request to the
server. Calling Key.close(fast=True) and making a subsequent request to
the server will work because boto will get an httplib exception and
close/reopen the connection.
"""
if self.resp and not fast:
self.resp.read()
self.resp = None
self.mode = None
self.closed = True
def next(self):
"""
By providing a next method, the key object supports use as an iterator.
For example, you can now say:
for bytes in key:
write bytes to a file or whatever
All of the HTTP connection stuff is handled for you.
"""
self.open_read()
data = self.resp.read(self.BufferSize)
if not data:
self.close()
raise StopIteration
return data
# Python 3 iterator support
__next__ = next
def read(self, size=0):
self.open_read()
if size == 0:
data = self.resp.read()
else:
data = self.resp.read(size)
if not data:
self.close()
return data
def change_storage_class(self, new_storage_class, dst_bucket=None,
validate_dst_bucket=True):
"""
Change the storage class of an existing key.
Depending on whether a different destination bucket is supplied
or not, this will either move the item within the bucket, preserving
all metadata and ACL info bucket changing the storage class or it
will copy the item to the provided destination bucket, also
preserving metadata and ACL info.
:type new_storage_class: string
:param new_storage_class: The new storage class for the Key.
Possible values are:
* STANDARD
* REDUCED_REDUNDANCY
:type dst_bucket: string
:param dst_bucket: The name of a destination bucket. If not
provided the current bucket of the key will be used.
:type validate_dst_bucket: bool
:param validate_dst_bucket: If True, will validate the dst_bucket
by using an extra list request.
"""
bucket_name = dst_bucket or self.bucket.name
if new_storage_class == 'STANDARD':
return self.copy(bucket_name, self.name,
reduced_redundancy=False, preserve_acl=True,
validate_dst_bucket=validate_dst_bucket)
elif new_storage_class == 'REDUCED_REDUNDANCY':
return self.copy(bucket_name, self.name,
reduced_redundancy=True, preserve_acl=True,
validate_dst_bucket=validate_dst_bucket)
else:
raise BotoClientError('Invalid storage class: %s' %
new_storage_class)
def copy(self, dst_bucket, dst_key, metadata=None,
reduced_redundancy=False, preserve_acl=False,
encrypt_key=False, validate_dst_bucket=True):
"""
Copy this Key to another bucket.
:type dst_bucket: string
:param dst_bucket: The name of the destination bucket
:type dst_key: string
:param dst_key: The name of the destination key
:type metadata: dict
:param metadata: Metadata to be associated with new key. If
metadata is supplied, it will replace the metadata of the
source key being copied. If no metadata is supplied, the
source key's metadata will be copied to the new key.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will force the
storage class of the new Key to be REDUCED_REDUNDANCY
regardless of the storage class of the key being copied.
The Reduced Redundancy Storage (RRS) feature of S3,
provides lower redundancy at lower storage cost.
:type preserve_acl: bool
:param preserve_acl: If True, the ACL from the source key will
be copied to the destination key. If False, the
destination key will have the default ACL. Note that
preserving the ACL in the new key object will require two
additional API calls to S3, one to retrieve the current
ACL and one to set that ACL on the new object. If you
don't care about the ACL, a value of False will be
significantly more efficient.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and will be stored
in an encrypted form while at rest in S3.
:type validate_dst_bucket: bool
:param validate_dst_bucket: If True, will validate the dst_bucket
by using an extra list request.
:rtype: :class:`boto.s3.key.Key` or subclass
:returns: An instance of the newly created key object
"""
dst_bucket = self.bucket.connection.lookup(dst_bucket,
validate_dst_bucket)
if reduced_redundancy:
storage_class = 'REDUCED_REDUNDANCY'
else:
storage_class = self.storage_class
return dst_bucket.copy_key(dst_key, self.bucket.name,
self.name, metadata,
storage_class=storage_class,
preserve_acl=preserve_acl,
encrypt_key=encrypt_key,
src_version_id=self.version_id)
def startElement(self, name, attrs, connection):
if name == 'Owner':
self.owner = User(self)
return self.owner
else:
return None
def endElement(self, name, value, connection):
if name == 'Key':
self.name = value
elif name == 'ETag':
self.etag = value
elif name == 'IsLatest':
if value == 'true':
self.is_latest = True
else:
self.is_latest = False
elif name == 'LastModified':
self.last_modified = value
elif name == 'Size':
self.size = int(value)
elif name == 'StorageClass':
self.storage_class = value
elif name == 'Owner':
pass
elif name == 'VersionId':
self.version_id = value
else:
setattr(self, name, value)
def exists(self, headers=None):
"""
Returns True if the key exists
:rtype: bool
:return: Whether the key exists on S3
"""
return bool(self.bucket.lookup(self.name, headers=headers))
def delete(self, headers=None):
"""
Delete this key from S3
"""
return self.bucket.delete_key(self.name, version_id=self.version_id,
headers=headers)
def get_metadata(self, name):
return self.metadata.get(name)
def set_metadata(self, name, value):
# Ensure that metadata that is vital to signing is in the correct
# case. Applies to ``Content-Type`` & ``Content-MD5``.
if name.lower() == 'content-type':
self.metadata['Content-Type'] = value
elif name.lower() == 'content-md5':
self.metadata['Content-MD5'] = value
else:
self.metadata[name] = value
if name.lower() in Key.base_user_settable_fields:
self.__dict__[name.lower().replace('-', '_')] = value
def update_metadata(self, d):
self.metadata.update(d)
# convenience methods for setting/getting ACL
def set_acl(self, acl_str, headers=None):
if self.bucket is not None:
self.bucket.set_acl(acl_str, self.name, headers=headers)
def get_acl(self, headers=None):
if self.bucket is not None:
return self.bucket.get_acl(self.name, headers=headers)
def get_xml_acl(self, headers=None):
if self.bucket is not None:
return self.bucket.get_xml_acl(self.name, headers=headers)
def set_xml_acl(self, acl_str, headers=None):
if self.bucket is not None:
return self.bucket.set_xml_acl(acl_str, self.name, headers=headers)
def set_canned_acl(self, acl_str, headers=None):
return self.bucket.set_canned_acl(acl_str, self.name, headers)
def get_redirect(self):
"""Return the redirect location configured for this key.
If no redirect is configured (via set_redirect), then None
will be returned.
"""
response = self.bucket.connection.make_request(
'HEAD', self.bucket.name, self.name)
if response.status == 200:
return response.getheader('x-amz-website-redirect-location')
else:
raise self.provider.storage_response_error(
response.status, response.reason, response.read())
def set_redirect(self, redirect_location, headers=None):
"""Configure this key to redirect to another location.
When the bucket associated with this key is accessed from the website
endpoint, a 301 redirect will be issued to the specified
`redirect_location`.
:type redirect_location: string
:param redirect_location: The location to redirect.
"""
if headers is None:
headers = {}
else:
headers = headers.copy()
headers['x-amz-website-redirect-location'] = redirect_location
response = self.bucket.connection.make_request('PUT', self.bucket.name,
self.name, headers)
if response.status == 200:
return True
else:
raise self.provider.storage_response_error(
response.status, response.reason, response.read())
def make_public(self, headers=None):
return self.bucket.set_canned_acl('public-read', self.name, headers)
def generate_url(self, expires_in, method='GET', headers=None,
query_auth=True, force_http=False, response_headers=None,
expires_in_absolute=False, version_id=None,
policy=None, reduced_redundancy=False, encrypt_key=False):
"""
Generate a URL to access this key.
:type expires_in: int
:param expires_in: How long the url is valid for, in seconds.
:type method: string
:param method: The method to use for retrieving the file
(default is GET).
:type headers: dict
:param headers: Any headers to pass along in the request.
:type query_auth: bool
:param query_auth: If True, signs the request in the URL.
:type force_http: bool
:param force_http: If True, http will be used instead of https.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
:type expires_in_absolute: bool
:param expires_in_absolute:
:type version_id: string
:param version_id: The version_id of the object to GET. If specified
this overrides any value in the key.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and will be stored
in an encrypted form while at rest in S3.
:rtype: string
:return: The URL to access the key
"""
provider = self.bucket.connection.provider
version_id = version_id or self.version_id
if headers is None:
headers = {}
else:
headers = headers.copy()
# add headers accordingly (usually PUT case)
if policy:
headers[provider.acl_header] = policy
if reduced_redundancy:
self.storage_class = 'REDUCED_REDUNDANCY'
if provider.storage_class_header:
headers[provider.storage_class_header] = self.storage_class
if encrypt_key:
headers[provider.server_side_encryption_header] = 'AES256'
headers = boto.utils.merge_meta(headers, self.metadata, provider)
return self.bucket.connection.generate_url(expires_in, method,
self.bucket.name, self.name,
headers, query_auth,
force_http,
response_headers,
expires_in_absolute,
version_id)
def send_file(self, fp, headers=None, cb=None, num_cb=10,
query_args=None, chunked_transfer=False, size=None):
"""
Upload a file to a key into a bucket on S3.
:type fp: file
:param fp: The file pointer to upload. The file pointer must
point at the offset from which you wish to upload.
ie. if uploading the full file, it should point at the
start of the file. Normally when a file is opened for
reading, the fp will point at the first byte. See the
bytes parameter below for more info.
:type headers: dict
:param headers: The headers to pass along with the PUT request
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file
transfer. Providing a negative integer will cause your
callback to be called with each buffer read.
:type query_args: string
:param query_args: (optional) Arguments to pass in the query string.
:type chunked_transfer: boolean
:param chunked_transfer: (optional) If true, we use chunked
Transfer-Encoding.
:type size: int
:param size: (optional) The Maximum number of bytes to read
from the file pointer (fp). This is useful when uploading
a file in multiple parts where you are splitting the file
up into different ranges to be uploaded. If not specified,
the default behaviour is to read all bytes from the file
pointer. Less bytes may be available.
"""
self._send_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb,
query_args=query_args,
chunked_transfer=chunked_transfer, size=size)
def _send_file_internal(self, fp, headers=None, cb=None, num_cb=10,
query_args=None, chunked_transfer=False, size=None,
hash_algs=None):
provider = self.bucket.connection.provider
try:
spos = fp.tell()
except IOError:
spos = None
self.read_from_stream = False
# If hash_algs is unset and the MD5 hasn't already been computed,
# default to an MD5 hash_alg to hash the data on-the-fly.
if hash_algs is None and not self.md5:
hash_algs = {'md5': md5}
digesters = dict((alg, hash_algs[alg]()) for alg in hash_algs or {})
def sender(http_conn, method, path, data, headers):
# This function is called repeatedly for temporary retries
# so we must be sure the file pointer is pointing at the
# start of the data.
if spos is not None and spos != fp.tell():
fp.seek(spos)
elif spos is None and self.read_from_stream:
# if seek is not supported, and we've read from this
# stream already, then we need to abort retries to
# avoid setting bad data.
raise provider.storage_data_error(
'Cannot retry failed request. fp does not support seeking.')
# If the caller explicitly specified host header, tell putrequest
# not to add a second host header. Similarly for accept-encoding.
skips = {}
if boto.utils.find_matching_headers('host', headers):
skips['skip_host'] = 1
if boto.utils.find_matching_headers('accept-encoding', headers):
skips['skip_accept_encoding'] = 1
http_conn.putrequest(method, path, **skips)
for key in headers:
http_conn.putheader(key, headers[key])
http_conn.endheaders()
save_debug = self.bucket.connection.debug
self.bucket.connection.debug = 0
# If the debuglevel < 4 we don't want to show connection
# payload, so turn off HTTP connection-level debug output (to
# be restored below).
# Use the getattr approach to allow this to work in AppEngine.
if getattr(http_conn, 'debuglevel', 0) < 4:
http_conn.set_debuglevel(0)
data_len = 0
if cb:
if size:
cb_size = size
elif self.size:
cb_size = self.size
else:
cb_size = 0
if chunked_transfer and cb_size == 0:
# For chunked Transfer, we call the cb for every 1MB
# of data transferred, except when we know size.
cb_count = (1024 * 1024) / self.BufferSize
elif num_cb > 1:
cb_count = int(
math.ceil(cb_size / self.BufferSize / (num_cb - 1.0)))
elif num_cb < 0:
cb_count = -1
else:
cb_count = 0
i = 0
cb(data_len, cb_size)
bytes_togo = size
if bytes_togo and bytes_togo < self.BufferSize:
chunk = fp.read(bytes_togo)
else:
chunk = fp.read(self.BufferSize)
if not isinstance(chunk, bytes):
chunk = chunk.encode('utf-8')
if spos is None:
# read at least something from a non-seekable fp.
self.read_from_stream = True
while chunk:
chunk_len = len(chunk)
data_len += chunk_len
if chunked_transfer:
http_conn.send('%x;\r\n' % chunk_len)
http_conn.send(chunk)
http_conn.send('\r\n')
else:
http_conn.send(chunk)
for alg in digesters:
digesters[alg].update(chunk)
if bytes_togo:
bytes_togo -= chunk_len
if bytes_togo <= 0:
break
if cb:
i += 1
if i == cb_count or cb_count == -1:
cb(data_len, cb_size)
i = 0
if bytes_togo and bytes_togo < self.BufferSize:
chunk = fp.read(bytes_togo)
else:
chunk = fp.read(self.BufferSize)
if not isinstance(chunk, bytes):
chunk = chunk.encode('utf-8')
self.size = data_len
for alg in digesters:
self.local_hashes[alg] = digesters[alg].digest()
if chunked_transfer:
http_conn.send('0\r\n')
# http_conn.send("Content-MD5: %s\r\n" % self.base64md5)
http_conn.send('\r\n')
if cb and (cb_count <= 1 or i > 0) and data_len > 0:
cb(data_len, cb_size)
http_conn.set_debuglevel(save_debug)
self.bucket.connection.debug = save_debug
response = http_conn.getresponse()
body = response.read()
if not self.should_retry(response, chunked_transfer):
raise provider.storage_response_error(
response.status, response.reason, body)
return response
if not headers:
headers = {}
else:
headers = headers.copy()
# Overwrite user-supplied user-agent.
for header in find_matching_headers('User-Agent', headers):
del headers[header]
headers['User-Agent'] = UserAgent
# If storage_class is None, then a user has not explicitly requested
# a storage class, so we can assume STANDARD here
if self._storage_class not in [None, 'STANDARD']:
headers[provider.storage_class_header] = self.storage_class
if find_matching_headers('Content-Encoding', headers):
self.content_encoding = merge_headers_by_name(
'Content-Encoding', headers)
if find_matching_headers('Content-Language', headers):
self.content_language = merge_headers_by_name(
'Content-Language', headers)
content_type_headers = find_matching_headers('Content-Type', headers)
if content_type_headers:
# Some use cases need to suppress sending of the Content-Type
# header and depend on the receiving server to set the content
# type. This can be achieved by setting headers['Content-Type']
# to None when calling this method.
if (len(content_type_headers) == 1 and
headers[content_type_headers[0]] is None):
# Delete null Content-Type value to skip sending that header.
del headers[content_type_headers[0]]
else:
self.content_type = merge_headers_by_name(
'Content-Type', headers)
elif self.path:
self.content_type = mimetypes.guess_type(self.path)[0]
if self.content_type is None:
self.content_type = self.DefaultContentType
headers['Content-Type'] = self.content_type
else:
headers['Content-Type'] = self.content_type
if self.base64md5:
headers['Content-MD5'] = self.base64md5
if chunked_transfer:
headers['Transfer-Encoding'] = 'chunked'
#if not self.base64md5:
# headers['Trailer'] = "Content-MD5"
else:
headers['Content-Length'] = str(self.size)
# This is terrible. We need a SHA256 of the body for SigV4, but to do
# the chunked ``sender`` behavior above, the ``fp`` isn't available to
# the auth mechanism (because closures). Detect if it's SigV4 & embelish
# while we can before the auth calculations occur.
if 'hmac-v4-s3' in self.bucket.connection._required_auth_capability():
kwargs = {'fp': fp, 'hash_algorithm': hashlib.sha256}
if size is not None:
kwargs['size'] = size
headers['_sha256'] = compute_hash(**kwargs)[0]
headers['Expect'] = '100-Continue'
headers = boto.utils.merge_meta(headers, self.metadata, provider)
resp = self.bucket.connection.make_request(
'PUT',
self.bucket.name,
self.name,
headers,
sender=sender,
query_args=query_args
)
self.handle_version_headers(resp, force=True)
self.handle_addl_headers(resp.getheaders())
def should_retry(self, response, chunked_transfer=False):
provider = self.bucket.connection.provider
if not chunked_transfer:
if response.status in [500, 503]:
# 500 & 503 can be plain retries.
return True
if response.getheader('location'):
# If there's a redirect, plain retry.
return True
if 200 <= response.status <= 299:
self.etag = response.getheader('etag')
md5 = self.md5
if isinstance(md5, bytes):
md5 = md5.decode('utf-8')
# If you use customer-provided encryption keys, the ETag value that
# Amazon S3 returns in the response will not be the MD5 of the
# object.
server_side_encryption_customer_algorithm = response.getheader(
'x-amz-server-side-encryption-customer-algorithm', None)
if server_side_encryption_customer_algorithm is None:
if self.etag != '"%s"' % md5:
raise provider.storage_data_error(
'ETag from S3 did not match computed MD5. '
'%s vs. %s' % (self.etag, self.md5))
return True
if response.status == 400:
# The 400 must be trapped so the retry handler can check to
# see if it was a timeout.
# If ``RequestTimeout`` is present, we'll retry. Otherwise, bomb
# out.
body = response.read()
err = provider.storage_response_error(
response.status,
response.reason,
body
)
if err.error_code in ['RequestTimeout']:
raise PleaseRetryException(
"Saw %s, retrying" % err.error_code,
response=response
)
return False
def compute_md5(self, fp, size=None):
"""
:type fp: file
:param fp: File pointer to the file to MD5 hash. The file
pointer will be reset to the same position before the
method returns.
:type size: int
:param size: (optional) The Maximum number of bytes to read
from the file pointer (fp). This is useful when uploading
a file in multiple parts where the file is being split
in place into different parts. Less bytes may be available.
"""
hex_digest, b64_digest, data_size = compute_md5(fp, size=size)
# Returned values are MD5 hash, base64 encoded MD5 hash, and data size.
# The internal implementation of compute_md5() needs to return the
# data size but we don't want to return that value to the external
# caller because it changes the class interface (i.e. it might
# break some code) so we consume the third tuple value here and
# return the remainder of the tuple to the caller, thereby preserving
# the existing interface.
self.size = data_size
return (hex_digest, b64_digest)
def set_contents_from_stream(self, fp, headers=None, replace=True,
cb=None, num_cb=10, policy=None,
reduced_redundancy=False, query_args=None,
size=None):
"""
Store an object using the name of the Key object as the key in
cloud and the contents of the data stream pointed to by 'fp' as
the contents.
The stream object is not seekable and total size is not known.
This has the implication that we can't specify the
Content-Size and Content-MD5 in the header. So for huge
uploads, the delay in calculating MD5 is avoided but with a
penalty of inability to verify the integrity of the uploaded
data.
:type fp: file
:param fp: the file whose contents are to be uploaded
:type headers: dict
:param headers: additional HTTP headers to be sent with the
PUT request.
:type replace: bool
:param replace: If this parameter is False, the method will first check
to see if an object exists in the bucket with the same key. If it
does, it won't overwrite it. The default value is True which will
overwrite the object.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two integer
parameters, the first representing the number of bytes that have
been successfully transmitted to GS and the second representing the
total number of bytes that need to be transmitted.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter, this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the new key
in GS.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type size: int
:param size: (optional) The Maximum number of bytes to read from
the file pointer (fp). This is useful when uploading a
file in multiple parts where you are splitting the file up
into different ranges to be uploaded. If not specified,
the default behaviour is to read all bytes from the file
pointer. Less bytes may be available.
"""
provider = self.bucket.connection.provider
if not provider.supports_chunked_transfer():
raise BotoClientError('%s does not support chunked transfer'
% provider.get_provider_name())
# Name of the Object should be specified explicitly for Streams.
if not self.name or self.name == '':
raise BotoClientError('Cannot determine the destination '
'object name for the given stream')
if headers is None:
headers = {}
if policy:
headers[provider.acl_header] = policy
if reduced_redundancy:
self.storage_class = 'REDUCED_REDUNDANCY'
if provider.storage_class_header:
headers[provider.storage_class_header] = self.storage_class
if self.bucket is not None:
if not replace:
if self.bucket.lookup(self.name):
return
self.send_file(fp, headers, cb, num_cb, query_args,
chunked_transfer=True, size=size)
def set_contents_from_file(self, fp, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=False, query_args=None,
encrypt_key=False, size=None, rewind=False):
"""
Store an object in S3 using the name of the Key object as the
key in S3 and the contents of the file pointed to by 'fp' as the
contents. The data is read from 'fp' from its current position until
'size' bytes have been read or EOF.
:type fp: file
:param fp: the file whose contents to upload
:type headers: dict
:param headers: Additional HTTP headers that will be sent with
the PUT request.
:type replace: bool
:param replace: If this parameter is False, the method will
first check to see if an object exists in the bucket with
the same key. If it does, it won't overwrite it. The
default value is True which will overwrite the object.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the second
element. This is the same format returned by the
compute_md5 method.
:param md5: If you need to compute the MD5 for any reason
prior to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values of the
file. Otherwise, the checksum will be computed.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and will be stored
in an encrypted form while at rest in S3.
:type size: int
:param size: (optional) The Maximum number of bytes to read
from the file pointer (fp). This is useful when uploading
a file in multiple parts where you are splitting the file
up into different ranges to be uploaded. If not specified,
the default behaviour is to read all bytes from the file
pointer. Less bytes may be available.
:type rewind: bool
:param rewind: (optional) If True, the file pointer (fp) will
be rewound to the start before any bytes are read from
it. The default behaviour is False which reads from the
current position of the file pointer (fp).
:rtype: int
:return: The number of bytes written to the key.
"""
provider = self.bucket.connection.provider
headers = headers or {}
if policy:
headers[provider.acl_header] = policy
if encrypt_key:
headers[provider.server_side_encryption_header] = 'AES256'
if rewind:
# caller requests reading from beginning of fp.
fp.seek(0, os.SEEK_SET)
else:
# The following seek/tell/seek logic is intended
# to detect applications using the older interface to
# set_contents_from_file(), which automatically rewound the
# file each time the Key was reused. This changed with commit
# 14ee2d03f4665fe20d19a85286f78d39d924237e, to support uploads
# split into multiple parts and uploaded in parallel, and at
# the time of that commit this check was added because otherwise
# older programs would get a success status and upload an empty
# object. Unfortuantely, it's very inefficient for fp's implemented
# by KeyFile (used, for example, by gsutil when copying between
# providers). So, we skip the check for the KeyFile case.
# TODO: At some point consider removing this seek/tell/seek
# logic, after enough time has passed that it's unlikely any
# programs remain that assume the older auto-rewind interface.
if not isinstance(fp, KeyFile):
spos = fp.tell()
fp.seek(0, os.SEEK_END)
if fp.tell() == spos:
fp.seek(0, os.SEEK_SET)
if fp.tell() != spos:
# Raise an exception as this is likely a programming
# error whereby there is data before the fp but nothing
# after it.
fp.seek(spos)
raise AttributeError('fp is at EOF. Use rewind option '
'or seek() to data start.')
# seek back to the correct position.
fp.seek(spos)
if reduced_redundancy:
self.storage_class = 'REDUCED_REDUNDANCY'
if provider.storage_class_header:
headers[provider.storage_class_header] = self.storage_class
# TODO - What if provider doesn't support reduced reduncancy?
# What if different providers provide different classes?
if hasattr(fp, 'name'):
self.path = fp.name
if self.bucket is not None:
if not md5 and provider.supports_chunked_transfer():
# defer md5 calculation to on the fly and
# we don't know anything about size yet.
chunked_transfer = True
self.size = None
else:
chunked_transfer = False
if isinstance(fp, KeyFile):
# Avoid EOF seek for KeyFile case as it's very inefficient.
key = fp.getkey()
size = key.size - fp.tell()
self.size = size
# At present both GCS and S3 use MD5 for the etag for
# non-multipart-uploaded objects. If the etag is 32 hex
# chars use it as an MD5, to avoid having to read the file
# twice while transferring.
if (re.match('^"[a-fA-F0-9]{32}"$', key.etag)):
etag = key.etag.strip('"')
md5 = (etag, base64.b64encode(binascii.unhexlify(etag)))
if not md5:
# compute_md5() and also set self.size to actual
# size of the bytes read computing the md5.
md5 = self.compute_md5(fp, size)
# adjust size if required
size = self.size
elif size:
self.size = size
else:
# If md5 is provided, still need to size so
# calculate based on bytes to end of content
spos = fp.tell()
fp.seek(0, os.SEEK_END)
self.size = fp.tell() - spos
fp.seek(spos)
size = self.size
self.md5 = md5[0]
self.base64md5 = md5[1]
if self.name is None:
self.name = self.md5
if not replace:
if self.bucket.lookup(self.name):
return
self.send_file(fp, headers=headers, cb=cb, num_cb=num_cb,
query_args=query_args,
chunked_transfer=chunked_transfer, size=size)
# return number of bytes written.
return self.size
def set_contents_from_filename(self, filename, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=False,
encrypt_key=False):
"""
Store an object in S3 using the name of the Key object as the
key in S3 and the contents of the file named by 'filename'.
See set_contents_from_file method for details about the
parameters.
:type filename: string
:param filename: The name of the file that you want to put onto S3
:type headers: dict
:param headers: Additional headers to pass along with the
request to AWS.
:type replace: bool
:param replace: If True, replaces the contents of the file
if it already exists.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the second
element. This is the same format returned by the
compute_md5 method.
:param md5: If you need to compute the MD5 for any reason
prior to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values of the
file. Otherwise, the checksum will be computed.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost. :type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object
will be encrypted on the server-side by S3 and will be
stored in an encrypted form while at rest in S3.
:rtype: int
:return: The number of bytes written to the key.
"""
with open(filename, 'rb') as fp:
return self.set_contents_from_file(fp, headers, replace, cb,
num_cb, policy, md5,
reduced_redundancy,
encrypt_key=encrypt_key)
def set_contents_from_string(self, string_data, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=False,
encrypt_key=False):
"""
Store an object in S3 using the name of the Key object as the
key in S3 and the string 's' as the contents.
See set_contents_from_file method for details about the
parameters.
:type headers: dict
:param headers: Additional headers to pass along with the
request to AWS.
:type replace: bool
:param replace: If True, replaces the contents of the file if
it already exists.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
num_cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the second
element. This is the same format returned by the
compute_md5 method.
:param md5: If you need to compute the MD5 for any reason
prior to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values of the
file. Otherwise, the checksum will be computed.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and will be stored
in an encrypted form while at rest in S3.
"""
if not isinstance(string_data, bytes):
string_data = string_data.encode("utf-8")
fp = BytesIO(string_data)
r = self.set_contents_from_file(fp, headers, replace, cb, num_cb,
policy, md5, reduced_redundancy,
encrypt_key=encrypt_key)
fp.close()
return r
def get_file(self, fp, headers=None, cb=None, num_cb=10,
torrent=False, version_id=None, override_num_retries=None,
response_headers=None):
"""
Retrieves a file from an S3 Key
:type fp: file
:param fp: File pointer to put the data into
:type headers: string
:param: headers to send when retrieving the files
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: Flag for whether to get a torrent for the file
:type override_num_retries: int
:param override_num_retries: If not None will override configured
num_retries parameter for underlying GET.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
:type version_id: str
:param version_id: The ID of a particular version of the object.
If this parameter is not supplied but the Key object has
a ``version_id`` attribute, that value will be used when
retrieving the object. You can set the Key object's
``version_id`` attribute to None to always grab the latest
version from a version-enabled bucket.
"""
self._get_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb,
torrent=torrent, version_id=version_id,
override_num_retries=override_num_retries,
response_headers=response_headers,
hash_algs=None,
query_args=None)
def _get_file_internal(self, fp, headers=None, cb=None, num_cb=10,
torrent=False, version_id=None, override_num_retries=None,
response_headers=None, hash_algs=None, query_args=None):
if headers is None:
headers = {}
save_debug = self.bucket.connection.debug
if self.bucket.connection.debug == 1:
self.bucket.connection.debug = 0
query_args = query_args or []
if torrent:
query_args.append('torrent')
if hash_algs is None and not torrent:
hash_algs = {'md5': md5}
digesters = dict((alg, hash_algs[alg]()) for alg in hash_algs or {})
# If a version_id is passed in, use that. If not, check to see
# if the Key object has an explicit version_id and, if so, use that.
# Otherwise, don't pass a version_id query param.
if version_id is None:
version_id = self.version_id
if version_id:
query_args.append('versionId=%s' % version_id)
if response_headers:
for key in response_headers:
query_args.append('%s=%s' % (
key, urllib.parse.quote(response_headers[key])))
query_args = '&'.join(query_args)
self.open('r', headers, query_args=query_args,
override_num_retries=override_num_retries)
data_len = 0
if cb:
if self.size is None:
cb_size = 0
else:
cb_size = self.size
if self.size is None and num_cb != -1:
# If size is not available due to chunked transfer for example,
# we'll call the cb for every 1MB of data transferred.
cb_count = (1024 * 1024) / self.BufferSize
elif num_cb > 1:
cb_count = int(math.ceil(cb_size/self.BufferSize/(num_cb-1.0)))
elif num_cb < 0:
cb_count = -1
else:
cb_count = 0
i = 0
cb(data_len, cb_size)
try:
for bytes in self:
fp.write(bytes)
data_len += len(bytes)
for alg in digesters:
digesters[alg].update(bytes)
if cb:
if cb_size > 0 and data_len >= cb_size:
break
i += 1
if i == cb_count or cb_count == -1:
cb(data_len, cb_size)
i = 0
except IOError as e:
if e.errno == errno.ENOSPC:
raise StorageDataError('Out of space for destination file '
'%s' % fp.name)
raise
if cb and (cb_count <= 1 or i > 0) and data_len > 0:
cb(data_len, cb_size)
for alg in digesters:
self.local_hashes[alg] = digesters[alg].digest()
if self.size is None and not torrent and "Range" not in headers:
self.size = data_len
self.close()
self.bucket.connection.debug = save_debug
def get_torrent_file(self, fp, headers=None, cb=None, num_cb=10):
"""
Get a torrent file (see to get_file)
:type fp: file
:param fp: The file pointer of where to put the torrent
:type headers: dict
:param headers: Headers to be passed
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
"""
return self.get_file(fp, headers, cb, num_cb, torrent=True)
def get_contents_to_file(self, fp, headers=None,
cb=None, num_cb=10,
torrent=False,
version_id=None,
res_download_handler=None,
response_headers=None):
"""
Retrieve an object from S3 using the name of the Key object as the
key in S3. Write the contents of the object to the file pointed
to by 'fp'.
:type fp: File -like object
:param fp:
:type headers: dict
:param headers: additional HTTP headers that will be sent with
the GET request.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent
file as a string.
:type res_upload_handler: ResumableDownloadHandler
:param res_download_handler: If provided, this handler will
perform the download.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
:type version_id: str
:param version_id: The ID of a particular version of the object.
If this parameter is not supplied but the Key object has
a ``version_id`` attribute, that value will be used when
retrieving the object. You can set the Key object's
``version_id`` attribute to None to always grab the latest
version from a version-enabled bucket.
"""
if self.bucket is not None:
if res_download_handler:
res_download_handler.get_file(self, fp, headers, cb, num_cb,
torrent=torrent,
version_id=version_id)
else:
self.get_file(fp, headers, cb, num_cb, torrent=torrent,
version_id=version_id,
response_headers=response_headers)
def get_contents_to_filename(self, filename, headers=None,
cb=None, num_cb=10,
torrent=False,
version_id=None,
res_download_handler=None,
response_headers=None):
"""
Retrieve an object from S3 using the name of the Key object as the
key in S3. Store contents of the object to a file named by 'filename'.
See get_contents_to_file method for details about the
parameters.
:type filename: string
:param filename: The filename of where to put the file contents
:type headers: dict
:param headers: Any additional headers to send in the request
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent file
as a string.
:type res_upload_handler: ResumableDownloadHandler
:param res_download_handler: If provided, this handler will
perform the download.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
:type version_id: str
:param version_id: The ID of a particular version of the object.
If this parameter is not supplied but the Key object has
a ``version_id`` attribute, that value will be used when
retrieving the object. You can set the Key object's
``version_id`` attribute to None to always grab the latest
version from a version-enabled bucket.
"""
try:
with open(filename, 'wb') as fp:
self.get_contents_to_file(fp, headers, cb, num_cb,
torrent=torrent,
version_id=version_id,
res_download_handler=res_download_handler,
response_headers=response_headers)
except Exception:
os.remove(filename)
raise
# if last_modified date was sent from s3, try to set file's timestamp
if self.last_modified is not None:
try:
modified_tuple = email.utils.parsedate_tz(self.last_modified)
modified_stamp = int(email.utils.mktime_tz(modified_tuple))
os.utime(fp.name, (modified_stamp, modified_stamp))
except Exception:
pass
def get_contents_as_string(self, headers=None,
cb=None, num_cb=10,
torrent=False,
version_id=None,
response_headers=None, encoding=None):
"""
Retrieve an object from S3 using the name of the Key object as the
key in S3. Return the contents of the object as a string.
See get_contents_to_file method for details about the
parameters.
:type headers: dict
:param headers: Any additional headers to send in the request
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent file
as a string.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
:type version_id: str
:param version_id: The ID of a particular version of the object.
If this parameter is not supplied but the Key object has
a ``version_id`` attribute, that value will be used when
retrieving the object. You can set the Key object's
``version_id`` attribute to None to always grab the latest
version from a version-enabled bucket.
:type encoding: str
:param encoding: The text encoding to use, such as ``utf-8``
or ``iso-8859-1``. If set, then a string will be returned.
Defaults to ``None`` and returns bytes.
:rtype: bytes or str
:returns: The contents of the file as bytes or a string
"""
fp = BytesIO()
self.get_contents_to_file(fp, headers, cb, num_cb, torrent=torrent,
version_id=version_id,
response_headers=response_headers)
value = fp.getvalue()
if encoding is not None:
value = value.decode(encoding)
return value
def add_email_grant(self, permission, email_address, headers=None):
"""
Convenience method that provides a quick way to add an email grant
to a key. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL
and then PUT's the new ACL back to S3.
:type permission: string
:param permission: The permission being granted. Should be one of:
(READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
:type email_address: string
:param email_address: The email address associated with the AWS
account your are granting the permission to.
:type recursive: boolean
:param recursive: A boolean value to controls whether the
command will apply the grant to all keys within the bucket
or not. The default value is False. By passing a True
value, the call will iterate through all keys in the
bucket and apply the same grant to each key. CAUTION: If
you have a lot of keys, this could take a long time!
"""
policy = self.get_acl(headers=headers)
policy.acl.add_email_grant(permission, email_address)
self.set_acl(policy, headers=headers)
def add_user_grant(self, permission, user_id, headers=None,
display_name=None):
"""
Convenience method that provides a quick way to add a canonical
user grant to a key. This method retrieves the current ACL,
creates a new grant based on the parameters passed in, adds that
grant to the ACL and then PUT's the new ACL back to S3.
:type permission: string
:param permission: The permission being granted. Should be one of:
(READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
:type user_id: string
:param user_id: The canonical user id associated with the AWS
account your are granting the permission to.
:type display_name: string
:param display_name: An option string containing the user's
Display Name. Only required on Walrus.
"""
policy = self.get_acl(headers=headers)
policy.acl.add_user_grant(permission, user_id,
display_name=display_name)
self.set_acl(policy, headers=headers)
def _normalize_metadata(self, metadata):
if type(metadata) == set:
norm_metadata = set()
for k in metadata:
norm_metadata.add(k.lower())
else:
norm_metadata = {}
for k in metadata:
norm_metadata[k.lower()] = metadata[k]
return norm_metadata
def _get_remote_metadata(self, headers=None):
"""
Extracts metadata from existing URI into a dict, so we can
overwrite/delete from it to form the new set of metadata to apply to a
key.
"""
metadata = {}
for underscore_name in self._underscore_base_user_settable_fields:
if hasattr(self, underscore_name):
value = getattr(self, underscore_name)
if value:
# Generate HTTP field name corresponding to "_" named field.
field_name = underscore_name.replace('_', '-')
metadata[field_name.lower()] = value
# self.metadata contains custom metadata, which are all user-settable.
prefix = self.provider.metadata_prefix
for underscore_name in self.metadata:
field_name = underscore_name.replace('_', '-')
metadata['%s%s' % (prefix, field_name.lower())] = (
self.metadata[underscore_name])
return metadata
def set_remote_metadata(self, metadata_plus, metadata_minus, preserve_acl,
headers=None):
metadata_plus = self._normalize_metadata(metadata_plus)
metadata_minus = self._normalize_metadata(metadata_minus)
metadata = self._get_remote_metadata()
metadata.update(metadata_plus)
for h in metadata_minus:
if h in metadata:
del metadata[h]
src_bucket = self.bucket
# Boto prepends the meta prefix when adding headers, so strip prefix in
# metadata before sending back in to copy_key() call.
rewritten_metadata = {}
for h in metadata:
if (h.startswith('x-goog-meta-') or h.startswith('x-amz-meta-')):
rewritten_h = (h.replace('x-goog-meta-', '')
.replace('x-amz-meta-', ''))
else:
rewritten_h = h
rewritten_metadata[rewritten_h] = metadata[h]
metadata = rewritten_metadata
src_bucket.copy_key(self.name, self.bucket.name, self.name,
metadata=metadata, preserve_acl=preserve_acl,
headers=headers)
def restore(self, days, headers=None):
"""Restore an object from an archive.
:type days: int
:param days: The lifetime of the restored object (must
be at least 1 day). If the object is already restored
then this parameter can be used to readjust the lifetime
of the restored object. In this case, the days
param is with respect to the initial time of the request.
If the object has not been restored, this param is with
respect to the completion time of the request.
"""
response = self.bucket.connection.make_request(
'POST', self.bucket.name, self.name,
data=self.RestoreBody % days,
headers=headers, query_args='restore')
if response.status not in (200, 202):
provider = self.bucket.connection.provider
raise provider.storage_response_error(response.status,
response.reason,
response.read())
| gpl-3.0 |
loriab/qcdb | data/sapt_HBC1.py | 2 | 28020 | DATA = {}
#DATA['SAPT MODELCHEM'] = 'SAPT3FC-SA-atz'
DATA['SAPT MODELCHEM'] = 'SAPT3FC-CP-atz' # LAB 2017 for consistency with saptone mc labels
DATA['SAPT ELST ENERGY'] = {}
DATA['SAPT ELST ENERGY']['HBC1-FaOOFaOO-3.4'] = -64.1477
DATA['SAPT ELST ENERGY']['HBC1-FaOOFaOO-3.5'] = -54.7215
DATA['SAPT ELST ENERGY']['HBC1-FaOOFaOO-3.6'] = -46.3004
DATA['SAPT ELST ENERGY']['HBC1-FaOOFaOO-3.7'] = -38.8446
DATA['SAPT ELST ENERGY']['HBC1-FaOOFaOO-3.8'] = -32.3404
DATA['SAPT ELST ENERGY']['HBC1-FaOOFaOO-3.9'] = -26.7808
DATA['SAPT ELST ENERGY']['HBC1-FaOOFaOO-4.0'] = -22.1358
DATA['SAPT ELST ENERGY']['HBC1-FaOOFaOO-4.1'] = -18.3327
DATA['SAPT ELST ENERGY']['HBC1-FaOOFaOO-4.2'] = -15.2633
DATA['SAPT ELST ENERGY']['HBC1-FaOOFaOO-4.3'] = -12.8012
DATA['SAPT ELST ENERGY']['HBC1-FaOOFaOO-4.4'] = -10.8272
DATA['SAPT ELST ENERGY']['HBC1-FaOOFaOO-4.6'] = -7.9407
DATA['SAPT ELST ENERGY']['HBC1-FaOOFaOO-4.8'] = -6.0030
DATA['SAPT ELST ENERGY']['HBC1-FaOOFaOO-5.0'] = -4.6554
DATA['SAPT ELST ENERGY']['HBC1-FaOOFaOO-5.4'] = -2.9656
DATA['SAPT ELST ENERGY']['HBC1-FaOOFaOO-5.8'] = -1.9884
DATA['SAPT ELST ENERGY']['HBC1-FaOOFaOO-6.4'] = -1.1661
DATA['SAPT ELST ENERGY']['HBC1-FaOOFaOO-7.0'] = -0.7288
DATA['SAPT ELST ENERGY']['HBC1-FaOOFaOO-8.0'] = -0.3720
DATA['SAPT ELST ENERGY']['HBC1-FaOOFaOO-10.0'] = -0.1302
DATA['SAPT ELST ENERGY']['HBC1-FaONFaON-3.4'] = -52.1042
DATA['SAPT ELST ENERGY']['HBC1-FaONFaON-3.5'] = -50.2892
DATA['SAPT ELST ENERGY']['HBC1-FaONFaON-3.6'] = -46.4649
DATA['SAPT ELST ENERGY']['HBC1-FaONFaON-3.7'] = -41.7727
DATA['SAPT ELST ENERGY']['HBC1-FaONFaON-3.8'] = -36.7899
DATA['SAPT ELST ENERGY']['HBC1-FaONFaON-3.9'] = -31.8843
DATA['SAPT ELST ENERGY']['HBC1-FaONFaON-4.0'] = -27.3138
DATA['SAPT ELST ENERGY']['HBC1-FaONFaON-4.1'] = -23.2359
DATA['SAPT ELST ENERGY']['HBC1-FaONFaON-4.2'] = -19.7235
DATA['SAPT ELST ENERGY']['HBC1-FaONFaON-4.3'] = -16.7741
DATA['SAPT ELST ENERGY']['HBC1-FaONFaON-4.4'] = -14.3370
DATA['SAPT ELST ENERGY']['HBC1-FaONFaON-4.6'] = -10.6975
DATA['SAPT ELST ENERGY']['HBC1-FaONFaON-4.8'] = -8.2231
DATA['SAPT ELST ENERGY']['HBC1-FaONFaON-5.0'] = -6.4954
DATA['SAPT ELST ENERGY']['HBC1-FaONFaON-5.4'] = -4.3289
DATA['SAPT ELST ENERGY']['HBC1-FaONFaON-5.8'] = -3.0684
DATA['SAPT ELST ENERGY']['HBC1-FaONFaON-6.4'] = -1.9699
DATA['SAPT ELST ENERGY']['HBC1-FaONFaON-7.0'] = -1.3449
DATA['SAPT ELST ENERGY']['HBC1-FaONFaON-8.0'] = -0.7826
DATA['SAPT ELST ENERGY']['HBC1-FaONFaON-10.0'] = -0.3340
DATA['SAPT ELST ENERGY']['HBC1-FaNNFaNN-3.4'] = -72.5427
DATA['SAPT ELST ENERGY']['HBC1-FaNNFaNN-3.5'] = -67.0366
DATA['SAPT ELST ENERGY']['HBC1-FaNNFaNN-3.6'] = -61.1923
DATA['SAPT ELST ENERGY']['HBC1-FaNNFaNN-3.7'] = -55.1763
DATA['SAPT ELST ENERGY']['HBC1-FaNNFaNN-3.8'] = -49.1404
DATA['SAPT ELST ENERGY']['HBC1-FaNNFaNN-3.9'] = -43.2330
DATA['SAPT ELST ENERGY']['HBC1-FaNNFaNN-4.0'] = -37.5930
DATA['SAPT ELST ENERGY']['HBC1-FaNNFaNN-4.1'] = -32.3463
DATA['SAPT ELST ENERGY']['HBC1-FaNNFaNN-4.2'] = -27.5956
DATA['SAPT ELST ENERGY']['HBC1-FaNNFaNN-4.3'] = -23.4032
DATA['SAPT ELST ENERGY']['HBC1-FaNNFaNN-4.4'] = -19.7907
DATA['SAPT ELST ENERGY']['HBC1-FaNNFaNN-4.6'] = -14.1725
DATA['SAPT ELST ENERGY']['HBC1-FaNNFaNN-4.8'] = -10.2838
DATA['SAPT ELST ENERGY']['HBC1-FaNNFaNN-5.0'] = -7.6106
DATA['SAPT ELST ENERGY']['HBC1-FaNNFaNN-5.4'] = -4.4348
DATA['SAPT ELST ENERGY']['HBC1-FaNNFaNN-5.8'] = -2.7691
DATA['SAPT ELST ENERGY']['HBC1-FaNNFaNN-6.4'] = -1.4904
DATA['SAPT ELST ENERGY']['HBC1-FaNNFaNN-7.0'] = -0.8626
DATA['SAPT ELST ENERGY']['HBC1-FaNNFaNN-8.0'] = -0.3883
DATA['SAPT ELST ENERGY']['HBC1-FaNNFaNN-10.0'] = -0.1028
DATA['SAPT ELST ENERGY']['HBC1-FaOOFaON-3.4'] = -59.4274
DATA['SAPT ELST ENERGY']['HBC1-FaOOFaON-3.5'] = -53.6298
DATA['SAPT ELST ENERGY']['HBC1-FaOOFaON-3.6'] = -47.2779
DATA['SAPT ELST ENERGY']['HBC1-FaOOFaON-3.7'] = -40.9512
DATA['SAPT ELST ENERGY']['HBC1-FaOOFaON-3.8'] = -34.9819
DATA['SAPT ELST ENERGY']['HBC1-FaOOFaON-3.9'] = -29.5708
DATA['SAPT ELST ENERGY']['HBC1-FaOOFaON-4.0'] = -24.8334
DATA['SAPT ELST ENERGY']['HBC1-FaOOFaON-4.1'] = -20.8123
DATA['SAPT ELST ENERGY']['HBC1-FaOOFaON-4.2'] = -17.4826
DATA['SAPT ELST ENERGY']['HBC1-FaOOFaON-4.3'] = -14.7671
DATA['SAPT ELST ENERGY']['HBC1-FaOOFaON-4.4'] = -12.5654
DATA['SAPT ELST ENERGY']['HBC1-FaOOFaON-4.6'] = -9.3176
DATA['SAPT ELST ENERGY']['HBC1-FaOOFaON-4.8'] = -7.1205
DATA['SAPT ELST ENERGY']['HBC1-FaOOFaON-5.0'] = -5.5872
DATA['SAPT ELST ENERGY']['HBC1-FaOOFaON-5.4'] = -3.6614
DATA['SAPT ELST ENERGY']['HBC1-FaOOFaON-5.8'] = -2.5399
DATA['SAPT ELST ENERGY']['HBC1-FaOOFaON-6.4'] = -1.5724
DATA['SAPT ELST ENERGY']['HBC1-FaOOFaON-7.0'] = -1.0350
DATA['SAPT ELST ENERGY']['HBC1-FaOOFaON-8.0'] = -0.5697
DATA['SAPT ELST ENERGY']['HBC1-FaOOFaON-10.0'] = -0.2224
DATA['SAPT ELST ENERGY']['HBC1-FaONFaNN-3.4'] = -63.0964
DATA['SAPT ELST ENERGY']['HBC1-FaONFaNN-3.5'] = -59.1529
DATA['SAPT ELST ENERGY']['HBC1-FaONFaNN-3.6'] = -54.2172
DATA['SAPT ELST ENERGY']['HBC1-FaONFaNN-3.7'] = -48.7953
DATA['SAPT ELST ENERGY']['HBC1-FaONFaNN-3.8'] = -43.2240
DATA['SAPT ELST ENERGY']['HBC1-FaONFaNN-3.9'] = -37.7616
DATA['SAPT ELST ENERGY']['HBC1-FaONFaNN-4.0'] = -32.6087
DATA['SAPT ELST ENERGY']['HBC1-FaONFaNN-4.1'] = -27.9150
DATA['SAPT ELST ENERGY']['HBC1-FaONFaNN-4.2'] = -23.7730
DATA['SAPT ELST ENERGY']['HBC1-FaONFaNN-4.3'] = -20.2094
DATA['SAPT ELST ENERGY']['HBC1-FaONFaNN-4.4'] = -17.1998
DATA['SAPT ELST ENERGY']['HBC1-FaONFaNN-4.6'] = -12.5946
DATA['SAPT ELST ENERGY']['HBC1-FaONFaNN-4.8'] = -9.4116
DATA['SAPT ELST ENERGY']['HBC1-FaONFaNN-5.0'] = -7.1953
DATA['SAPT ELST ENERGY']['HBC1-FaONFaNN-5.4'] = -4.4857
DATA['SAPT ELST ENERGY']['HBC1-FaONFaNN-5.8'] = -2.9981
DATA['SAPT ELST ENERGY']['HBC1-FaONFaNN-6.4'] = -1.7882
DATA['SAPT ELST ENERGY']['HBC1-FaONFaNN-7.0'] = -1.1465
DATA['SAPT ELST ENERGY']['HBC1-FaONFaNN-8.0'] = -0.6108
DATA['SAPT ELST ENERGY']['HBC1-FaONFaNN-10.0'] = -0.2271
DATA['SAPT ELST ENERGY']['HBC1-FaOOFaNN-3.6'] = -63.7675
DATA['SAPT ELST ENERGY']['HBC1-FaOOFaNN-3.7'] = -53.7870
DATA['SAPT ELST ENERGY']['HBC1-FaOOFaNN-3.8'] = -45.7778
DATA['SAPT ELST ENERGY']['HBC1-FaOOFaNN-3.9'] = -38.8359
DATA['SAPT ELST ENERGY']['HBC1-FaOOFaNN-4.0'] = -32.7947
DATA['SAPT ELST ENERGY']['HBC1-FaOOFaNN-4.1'] = -27.5912
DATA['SAPT ELST ENERGY']['HBC1-FaOOFaNN-4.2'] = -23.1754
DATA['SAPT ELST ENERGY']['HBC1-FaOOFaNN-4.3'] = -19.4816
DATA['SAPT ELST ENERGY']['HBC1-FaOOFaNN-4.4'] = -16.4298
DATA['SAPT ELST ENERGY']['HBC1-FaOOFaNN-4.6'] = -11.8814
DATA['SAPT ELST ENERGY']['HBC1-FaOOFaNN-4.8'] = -8.8297
DATA['SAPT ELST ENERGY']['HBC1-FaOOFaNN-5.0'] = -6.7435
DATA['SAPT ELST ENERGY']['HBC1-FaOOFaNN-5.4'] = -4.2105
DATA['SAPT ELST ENERGY']['HBC1-FaOOFaNN-5.8'] = -2.8088
DATA['SAPT ELST ENERGY']['HBC1-FaOOFaNN-6.4'] = -1.6586
DATA['SAPT ELST ENERGY']['HBC1-FaOOFaNN-7.0'] = -1.0488
DATA['SAPT ELST ENERGY']['HBC1-FaOOFaNN-8.0'] = -0.5447
DATA['SAPT ELST ENERGY']['HBC1-FaOOFaNN-10.0'] = -0.1931
DATA['SAPT EXCH ENERGY'] = {}
DATA['SAPT EXCH ENERGY']['HBC1-FaOOFaOO-3.4'] = 112.2776
DATA['SAPT EXCH ENERGY']['HBC1-FaOOFaOO-3.5'] = 89.9583
DATA['SAPT EXCH ENERGY']['HBC1-FaOOFaOO-3.6'] = 70.8535
DATA['SAPT EXCH ENERGY']['HBC1-FaOOFaOO-3.7'] = 54.7344
DATA['SAPT EXCH ENERGY']['HBC1-FaOOFaOO-3.8'] = 41.4251
DATA['SAPT EXCH ENERGY']['HBC1-FaOOFaOO-3.9'] = 30.7346
DATA['SAPT EXCH ENERGY']['HBC1-FaOOFaOO-4.0'] = 22.4114
DATA['SAPT EXCH ENERGY']['HBC1-FaOOFaOO-4.1'] = 16.1235
DATA['SAPT EXCH ENERGY']['HBC1-FaOOFaOO-4.2'] = 11.4908
DATA['SAPT EXCH ENERGY']['HBC1-FaOOFaOO-4.3'] = 8.1404
DATA['SAPT EXCH ENERGY']['HBC1-FaOOFaOO-4.4'] = 5.7477
DATA['SAPT EXCH ENERGY']['HBC1-FaOOFaOO-4.6'] = 2.8542
DATA['SAPT EXCH ENERGY']['HBC1-FaOOFaOO-4.8'] = 1.4167
DATA['SAPT EXCH ENERGY']['HBC1-FaOOFaOO-5.0'] = 0.7053
DATA['SAPT EXCH ENERGY']['HBC1-FaOOFaOO-5.4'] = 0.1774
DATA['SAPT EXCH ENERGY']['HBC1-FaOOFaOO-5.8'] = 0.0452
DATA['SAPT EXCH ENERGY']['HBC1-FaOOFaOO-6.4'] = 0.0059
DATA['SAPT EXCH ENERGY']['HBC1-FaOOFaOO-7.0'] = 0.0008
DATA['SAPT EXCH ENERGY']['HBC1-FaOOFaOO-8.0'] = 0.0000
DATA['SAPT EXCH ENERGY']['HBC1-FaOOFaOO-10.0'] = -0.0000
DATA['SAPT EXCH ENERGY']['HBC1-FaONFaON-3.4'] = 89.9867
DATA['SAPT EXCH ENERGY']['HBC1-FaONFaON-3.5'] = 83.6895
DATA['SAPT EXCH ENERGY']['HBC1-FaONFaON-3.6'] = 73.7083
DATA['SAPT EXCH ENERGY']['HBC1-FaONFaON-3.7'] = 62.4514
DATA['SAPT EXCH ENERGY']['HBC1-FaONFaON-3.8'] = 51.1783
DATA['SAPT EXCH ENERGY']['HBC1-FaONFaON-3.9'] = 40.6789
DATA['SAPT EXCH ENERGY']['HBC1-FaONFaON-4.0'] = 31.4429
DATA['SAPT EXCH ENERGY']['HBC1-FaONFaON-4.1'] = 23.7155
DATA['SAPT EXCH ENERGY']['HBC1-FaONFaON-4.2'] = 17.5285
DATA['SAPT EXCH ENERGY']['HBC1-FaONFaON-4.3'] = 12.7542
DATA['SAPT EXCH ENERGY']['HBC1-FaONFaON-4.4'] = 9.1762
DATA['SAPT EXCH ENERGY']['HBC1-FaONFaON-4.6'] = 4.6549
DATA['SAPT EXCH ENERGY']['HBC1-FaONFaON-4.8'] = 2.3289
DATA['SAPT EXCH ENERGY']['HBC1-FaONFaON-5.0'] = 1.1596
DATA['SAPT EXCH ENERGY']['HBC1-FaONFaON-5.4'] = 0.2912
DATA['SAPT EXCH ENERGY']['HBC1-FaONFaON-5.8'] = 0.0751
DATA['SAPT EXCH ENERGY']['HBC1-FaONFaON-6.4'] = 0.0102
DATA['SAPT EXCH ENERGY']['HBC1-FaONFaON-7.0'] = 0.0014
DATA['SAPT EXCH ENERGY']['HBC1-FaONFaON-8.0'] = 0.0001
DATA['SAPT EXCH ENERGY']['HBC1-FaONFaON-10.0'] = -0.0000
DATA['SAPT EXCH ENERGY']['HBC1-FaNNFaNN-3.4'] = 131.4618
DATA['SAPT EXCH ENERGY']['HBC1-FaNNFaNN-3.5'] = 118.0754
DATA['SAPT EXCH ENERGY']['HBC1-FaNNFaNN-3.6'] = 104.2186
DATA['SAPT EXCH ENERGY']['HBC1-FaNNFaNN-3.7'] = 90.3364
DATA['SAPT EXCH ENERGY']['HBC1-FaNNFaNN-3.8'] = 76.8267
DATA['SAPT EXCH ENERGY']['HBC1-FaNNFaNN-3.9'] = 64.0415
DATA['SAPT EXCH ENERGY']['HBC1-FaNNFaNN-4.0'] = 52.2852
DATA['SAPT EXCH ENERGY']['HBC1-FaNNFaNN-4.1'] = 41.8041
DATA['SAPT EXCH ENERGY']['HBC1-FaNNFaNN-4.2'] = 32.7608
DATA['SAPT EXCH ENERGY']['HBC1-FaNNFaNN-4.3'] = 25.2124
DATA['SAPT EXCH ENERGY']['HBC1-FaNNFaNN-4.4'] = 19.1055
DATA['SAPT EXCH ENERGY']['HBC1-FaNNFaNN-4.6'] = 10.5904
DATA['SAPT EXCH ENERGY']['HBC1-FaNNFaNN-4.8'] = 5.6880
DATA['SAPT EXCH ENERGY']['HBC1-FaNNFaNN-5.0'] = 3.0028
DATA['SAPT EXCH ENERGY']['HBC1-FaNNFaNN-5.4'] = 0.8231
DATA['SAPT EXCH ENERGY']['HBC1-FaNNFaNN-5.8'] = 0.2271
DATA['SAPT EXCH ENERGY']['HBC1-FaNNFaNN-6.4'] = 0.0336
DATA['SAPT EXCH ENERGY']['HBC1-FaNNFaNN-7.0'] = 0.0050
DATA['SAPT EXCH ENERGY']['HBC1-FaNNFaNN-8.0'] = 0.0002
DATA['SAPT EXCH ENERGY']['HBC1-FaNNFaNN-10.0'] = 0.0000
DATA['SAPT EXCH ENERGY']['HBC1-FaOOFaON-3.4'] = 102.6104
DATA['SAPT EXCH ENERGY']['HBC1-FaOOFaON-3.5'] = 88.0186
DATA['SAPT EXCH ENERGY']['HBC1-FaOOFaON-3.6'] = 72.9510
DATA['SAPT EXCH ENERGY']['HBC1-FaOOFaON-3.7'] = 58.7118
DATA['SAPT EXCH ENERGY']['HBC1-FaOOFaON-3.8'] = 45.9881
DATA['SAPT EXCH ENERGY']['HBC1-FaOOFaON-3.9'] = 35.1232
DATA['SAPT EXCH ENERGY']['HBC1-FaOOFaON-4.0'] = 26.2315
DATA['SAPT EXCH ENERGY']['HBC1-FaOOFaON-4.1'] = 19.2425
DATA['SAPT EXCH ENERGY']['HBC1-FaOOFaON-4.2'] = 13.9366
DATA['SAPT EXCH ENERGY']['HBC1-FaOOFaON-4.3'] = 10.0113
DATA['SAPT EXCH ENERGY']['HBC1-FaOOFaON-4.4'] = 7.1549
DATA['SAPT EXCH ENERGY']['HBC1-FaOOFaON-4.6'] = 3.6201
DATA['SAPT EXCH ENERGY']['HBC1-FaOOFaON-4.8'] = 1.8159
DATA['SAPT EXCH ENERGY']['HBC1-FaOOFaON-5.0'] = 0.9068
DATA['SAPT EXCH ENERGY']['HBC1-FaOOFaON-5.4'] = 0.2279
DATA['SAPT EXCH ENERGY']['HBC1-FaOOFaON-5.8'] = 0.0585
DATA['SAPT EXCH ENERGY']['HBC1-FaOOFaON-6.4'] = 0.0078
DATA['SAPT EXCH ENERGY']['HBC1-FaOOFaON-7.0'] = 0.0010
DATA['SAPT EXCH ENERGY']['HBC1-FaOOFaON-8.0'] = 0.0000
DATA['SAPT EXCH ENERGY']['HBC1-FaOOFaON-10.0'] = -0.0000
DATA['SAPT EXCH ENERGY']['HBC1-FaONFaNN-3.4'] = 111.7221
DATA['SAPT EXCH ENERGY']['HBC1-FaONFaNN-3.5'] = 101.4555
DATA['SAPT EXCH ENERGY']['HBC1-FaONFaNN-3.6'] = 89.3718
DATA['SAPT EXCH ENERGY']['HBC1-FaONFaNN-3.7'] = 76.6649
DATA['SAPT EXCH ENERGY']['HBC1-FaONFaNN-3.8'] = 64.1357
DATA['SAPT EXCH ENERGY']['HBC1-FaONFaNN-3.9'] = 52.3602
DATA['SAPT EXCH ENERGY']['HBC1-FaONFaNN-4.0'] = 41.7570
DATA['SAPT EXCH ENERGY']['HBC1-FaONFaNN-4.1'] = 32.5884
DATA['SAPT EXCH ENERGY']['HBC1-FaONFaNN-4.2'] = 24.9604
DATA['SAPT EXCH ENERGY']['HBC1-FaONFaNN-4.3'] = 18.8277
DATA['SAPT EXCH ENERGY']['HBC1-FaONFaNN-4.4'] = 14.0332
DATA['SAPT EXCH ENERGY']['HBC1-FaONFaNN-4.6'] = 7.5943
DATA['SAPT EXCH ENERGY']['HBC1-FaONFaNN-4.8'] = 4.0088
DATA['SAPT EXCH ENERGY']['HBC1-FaONFaNN-5.0'] = 2.0838
DATA['SAPT EXCH ENERGY']['HBC1-FaONFaNN-5.4'] = 0.5530
DATA['SAPT EXCH ENERGY']['HBC1-FaONFaNN-5.8'] = 0.1479
DATA['SAPT EXCH ENERGY']['HBC1-FaONFaNN-6.4'] = 0.0210
DATA['SAPT EXCH ENERGY']['HBC1-FaONFaNN-7.0'] = 0.0030
DATA['SAPT EXCH ENERGY']['HBC1-FaONFaNN-8.0'] = 0.0001
DATA['SAPT EXCH ENERGY']['HBC1-FaONFaNN-10.0'] = 0.0000
DATA['SAPT EXCH ENERGY']['HBC1-FaOOFaNN-3.6'] = 104.2489
DATA['SAPT EXCH ENERGY']['HBC1-FaOOFaNN-3.7'] = 81.6304
DATA['SAPT EXCH ENERGY']['HBC1-FaOOFaNN-3.8'] = 64.2949
DATA['SAPT EXCH ENERGY']['HBC1-FaOOFaNN-3.9'] = 50.0394
DATA['SAPT EXCH ENERGY']['HBC1-FaOOFaNN-4.0'] = 38.3316
DATA['SAPT EXCH ENERGY']['HBC1-FaOOFaNN-4.1'] = 28.8814
DATA['SAPT EXCH ENERGY']['HBC1-FaOOFaNN-4.2'] = 21.4283
DATA['SAPT EXCH ENERGY']['HBC1-FaOOFaNN-4.3'] = 15.6923
DATA['SAPT EXCH ENERGY']['HBC1-FaOOFaNN-4.4'] = 11.3774
DATA['SAPT EXCH ENERGY']['HBC1-FaOOFaNN-4.6'] = 5.8711
DATA['SAPT EXCH ENERGY']['HBC1-FaOOFaNN-4.8'] = 2.9967
DATA['SAPT EXCH ENERGY']['HBC1-FaOOFaNN-5.0'] = 1.5274
DATA['SAPT EXCH ENERGY']['HBC1-FaOOFaNN-5.4'] = 0.4014
DATA['SAPT EXCH ENERGY']['HBC1-FaOOFaNN-5.8'] = 0.1076
DATA['SAPT EXCH ENERGY']['HBC1-FaOOFaNN-6.4'] = 0.0153
DATA['SAPT EXCH ENERGY']['HBC1-FaOOFaNN-7.0'] = 0.0022
DATA['SAPT EXCH ENERGY']['HBC1-FaOOFaNN-8.0'] = 0.0001
DATA['SAPT EXCH ENERGY']['HBC1-FaOOFaNN-10.0'] = 0.0000
DATA['SAPT IND ENERGY'] = {}
DATA['SAPT IND ENERGY']['HBC1-FaOOFaOO-3.4'] = -55.2637
DATA['SAPT IND ENERGY']['HBC1-FaOOFaOO-3.5'] = -43.1617
DATA['SAPT IND ENERGY']['HBC1-FaOOFaOO-3.6'] = -33.4137
DATA['SAPT IND ENERGY']['HBC1-FaOOFaOO-3.7'] = -25.5618
DATA['SAPT IND ENERGY']['HBC1-FaOOFaOO-3.8'] = -19.3060
DATA['SAPT IND ENERGY']['HBC1-FaOOFaOO-3.9'] = -14.4152
DATA['SAPT IND ENERGY']['HBC1-FaOOFaOO-4.0'] = -10.6788
DATA['SAPT IND ENERGY']['HBC1-FaOOFaOO-4.1'] = -7.8846
DATA['SAPT IND ENERGY']['HBC1-FaOOFaOO-4.2'] = -5.8265
DATA['SAPT IND ENERGY']['HBC1-FaOOFaOO-4.3'] = -4.3229
DATA['SAPT IND ENERGY']['HBC1-FaOOFaOO-4.4'] = -3.2264
DATA['SAPT IND ENERGY']['HBC1-FaOOFaOO-4.6'] = -1.8363
DATA['SAPT IND ENERGY']['HBC1-FaOOFaOO-4.8'] = -1.0780
DATA['SAPT IND ENERGY']['HBC1-FaOOFaOO-5.0'] = -0.6540
DATA['SAPT IND ENERGY']['HBC1-FaOOFaOO-5.4'] = -0.2655
DATA['SAPT IND ENERGY']['HBC1-FaOOFaOO-5.8'] = -0.1204
DATA['SAPT IND ENERGY']['HBC1-FaOOFaOO-6.4'] = -0.0433
DATA['SAPT IND ENERGY']['HBC1-FaOOFaOO-7.0'] = -0.0183
DATA['SAPT IND ENERGY']['HBC1-FaOOFaOO-8.0'] = -0.0055
DATA['SAPT IND ENERGY']['HBC1-FaOOFaOO-10.0'] = -0.0008
DATA['SAPT IND ENERGY']['HBC1-FaONFaON-3.4'] = -31.2744
DATA['SAPT IND ENERGY']['HBC1-FaONFaON-3.5'] = -30.7836
DATA['SAPT IND ENERGY']['HBC1-FaONFaON-3.6'] = -27.9320
DATA['SAPT IND ENERGY']['HBC1-FaONFaON-3.7'] = -24.1230
DATA['SAPT IND ENERGY']['HBC1-FaONFaON-3.8'] = -20.0828
DATA['SAPT IND ENERGY']['HBC1-FaONFaON-3.9'] = -16.2364
DATA['SAPT IND ENERGY']['HBC1-FaONFaON-4.0'] = -12.8258
DATA['SAPT IND ENERGY']['HBC1-FaONFaON-4.1'] = -9.9605
DATA['SAPT IND ENERGY']['HBC1-FaONFaON-4.2'] = -7.6518
DATA['SAPT IND ENERGY']['HBC1-FaONFaON-4.3'] = -5.8481
DATA['SAPT IND ENERGY']['HBC1-FaONFaON-4.4'] = -4.4676
DATA['SAPT IND ENERGY']['HBC1-FaONFaON-4.6'] = -2.6360
DATA['SAPT IND ENERGY']['HBC1-FaONFaON-4.8'] = -1.5940
DATA['SAPT IND ENERGY']['HBC1-FaONFaON-5.0'] = -0.9938
DATA['SAPT IND ENERGY']['HBC1-FaONFaON-5.4'] = -0.4283
DATA['SAPT IND ENERGY']['HBC1-FaONFaON-5.8'] = -0.2088
DATA['SAPT IND ENERGY']['HBC1-FaONFaON-6.4'] = -0.0843
DATA['SAPT IND ENERGY']['HBC1-FaONFaON-7.0'] = -0.0396
DATA['SAPT IND ENERGY']['HBC1-FaONFaON-8.0'] = -0.0139
DATA['SAPT IND ENERGY']['HBC1-FaONFaON-10.0'] = -0.0027
DATA['SAPT IND ENERGY']['HBC1-FaNNFaNN-3.4'] = -52.2044
DATA['SAPT IND ENERGY']['HBC1-FaNNFaNN-3.5'] = -46.9399
DATA['SAPT IND ENERGY']['HBC1-FaNNFaNN-3.6'] = -41.3560
DATA['SAPT IND ENERGY']['HBC1-FaNNFaNN-3.7'] = -35.7351
DATA['SAPT IND ENERGY']['HBC1-FaNNFaNN-3.8'] = -30.3006
DATA['SAPT IND ENERGY']['HBC1-FaNNFaNN-3.9'] = -25.2230
DATA['SAPT IND ENERGY']['HBC1-FaNNFaNN-4.0'] = -20.6276
DATA['SAPT IND ENERGY']['HBC1-FaNNFaNN-4.1'] = -16.5960
DATA['SAPT IND ENERGY']['HBC1-FaNNFaNN-4.2'] = -13.1662
DATA['SAPT IND ENERGY']['HBC1-FaNNFaNN-4.3'] = -10.3326
DATA['SAPT IND ENERGY']['HBC1-FaNNFaNN-4.4'] = -8.0499
DATA['SAPT IND ENERGY']['HBC1-FaNNFaNN-4.6'] = -4.8421
DATA['SAPT IND ENERGY']['HBC1-FaNNFaNN-4.8'] = -2.9227
DATA['SAPT IND ENERGY']['HBC1-FaNNFaNN-5.0'] = -1.7920
DATA['SAPT IND ENERGY']['HBC1-FaNNFaNN-5.4'] = -0.7241
DATA['SAPT IND ENERGY']['HBC1-FaNNFaNN-5.8'] = -0.3265
DATA['SAPT IND ENERGY']['HBC1-FaNNFaNN-6.4'] = -0.1190
DATA['SAPT IND ENERGY']['HBC1-FaNNFaNN-7.0'] = -0.0518
DATA['SAPT IND ENERGY']['HBC1-FaNNFaNN-8.0'] = -0.0168
DATA['SAPT IND ENERGY']['HBC1-FaNNFaNN-10.0'] = -0.0031
DATA['SAPT IND ENERGY']['HBC1-FaOOFaON-3.4'] = -44.5465
DATA['SAPT IND ENERGY']['HBC1-FaOOFaON-3.5'] = -38.4883
DATA['SAPT IND ENERGY']['HBC1-FaOOFaON-3.6'] = -31.9190
DATA['SAPT IND ENERGY']['HBC1-FaOOFaON-3.7'] = -25.6899
DATA['SAPT IND ENERGY']['HBC1-FaOOFaON-3.8'] = -20.1830
DATA['SAPT IND ENERGY']['HBC1-FaOOFaON-3.9'] = -15.5430
DATA['SAPT IND ENERGY']['HBC1-FaOOFaON-4.0'] = -11.7883
DATA['SAPT IND ENERGY']['HBC1-FaOOFaON-4.1'] = -8.8565
DATA['SAPT IND ENERGY']['HBC1-FaOOFaON-4.2'] = -6.6315
DATA['SAPT IND ENERGY']['HBC1-FaOOFaON-4.3'] = -4.9732
DATA['SAPT IND ENERGY']['HBC1-FaOOFaON-4.4'] = -3.7473
DATA['SAPT IND ENERGY']['HBC1-FaOOFaON-4.6'] = -2.1697
DATA['SAPT IND ENERGY']['HBC1-FaOOFaON-4.8'] = -1.2933
DATA['SAPT IND ENERGY']['HBC1-FaOOFaON-5.0'] = -0.7958
DATA['SAPT IND ENERGY']['HBC1-FaOOFaON-5.4'] = -0.3334
DATA['SAPT IND ENERGY']['HBC1-FaOOFaON-5.8'] = -0.1570
DATA['SAPT IND ENERGY']['HBC1-FaOOFaON-6.4'] = -0.0602
DATA['SAPT IND ENERGY']['HBC1-FaOOFaON-7.0'] = -0.0270
DATA['SAPT IND ENERGY']['HBC1-FaOOFaON-8.0'] = -0.0089
DATA['SAPT IND ENERGY']['HBC1-FaOOFaON-10.0'] = -0.0016
DATA['SAPT IND ENERGY']['HBC1-FaONFaNN-3.4'] = -42.3357
DATA['SAPT IND ENERGY']['HBC1-FaONFaNN-3.5'] = -39.2897
DATA['SAPT IND ENERGY']['HBC1-FaONFaNN-3.6'] = -34.9837
DATA['SAPT IND ENERGY']['HBC1-FaONFaNN-3.7'] = -30.1826
DATA['SAPT IND ENERGY']['HBC1-FaONFaNN-3.8'] = -25.3633
DATA['SAPT IND ENERGY']['HBC1-FaONFaNN-3.9'] = -20.8317
DATA['SAPT IND ENERGY']['HBC1-FaONFaNN-4.0'] = -16.7772
DATA['SAPT IND ENERGY']['HBC1-FaONFaNN-4.1'] = -13.2987
DATA['SAPT IND ENERGY']['HBC1-FaONFaNN-4.2'] = -10.4203
DATA['SAPT IND ENERGY']['HBC1-FaONFaNN-4.3'] = -8.1071
DATA['SAPT IND ENERGY']['HBC1-FaONFaNN-4.4'] = -6.2868
DATA['SAPT IND ENERGY']['HBC1-FaONFaNN-4.6'] = -3.7822
DATA['SAPT IND ENERGY']['HBC1-FaONFaNN-4.8'] = -2.2998
DATA['SAPT IND ENERGY']['HBC1-FaONFaNN-5.0'] = -1.4256
DATA['SAPT IND ENERGY']['HBC1-FaONFaNN-5.4'] = -0.5918
DATA['SAPT IND ENERGY']['HBC1-FaONFaNN-5.8'] = -0.2753
DATA['SAPT IND ENERGY']['HBC1-FaONFaNN-6.4'] = -0.1049
DATA['SAPT IND ENERGY']['HBC1-FaONFaNN-7.0'] = -0.0473
DATA['SAPT IND ENERGY']['HBC1-FaONFaNN-8.0'] = -0.0159
DATA['SAPT IND ENERGY']['HBC1-FaONFaNN-10.0'] = -0.0030
DATA['SAPT IND ENERGY']['HBC1-FaOOFaNN-3.6'] = -54.2605
DATA['SAPT IND ENERGY']['HBC1-FaOOFaNN-3.7'] = -39.9282
DATA['SAPT IND ENERGY']['HBC1-FaOOFaNN-3.8'] = -30.5018
DATA['SAPT IND ENERGY']['HBC1-FaOOFaNN-3.9'] = -23.3858
DATA['SAPT IND ENERGY']['HBC1-FaOOFaNN-4.0'] = -17.8612
DATA['SAPT IND ENERGY']['HBC1-FaOOFaNN-4.1'] = -13.5642
DATA['SAPT IND ENERGY']['HBC1-FaOOFaNN-4.2'] = -10.2495
DATA['SAPT IND ENERGY']['HBC1-FaOOFaNN-4.3'] = -7.7236
DATA['SAPT IND ENERGY']['HBC1-FaOOFaNN-4.4'] = -5.8211
DATA['SAPT IND ENERGY']['HBC1-FaOOFaNN-4.6'] = -3.3409
DATA['SAPT IND ENERGY']['HBC1-FaOOFaNN-4.8'] = -1.9641
DATA['SAPT IND ENERGY']['HBC1-FaOOFaNN-5.0'] = -1.1895
DATA['SAPT IND ENERGY']['HBC1-FaOOFaNN-5.4'] = -0.4787
DATA['SAPT IND ENERGY']['HBC1-FaOOFaNN-5.8'] = -0.2158
DATA['SAPT IND ENERGY']['HBC1-FaOOFaNN-6.4'] = -0.0778
DATA['SAPT IND ENERGY']['HBC1-FaOOFaNN-7.0'] = -0.0332
DATA['SAPT IND ENERGY']['HBC1-FaOOFaNN-8.0'] = -0.0104
DATA['SAPT IND ENERGY']['HBC1-FaOOFaNN-10.0'] = -0.0018
DATA['SAPT DISP ENERGY'] = {}
DATA['SAPT DISP ENERGY']['HBC1-FaOOFaOO-3.4'] = -20.1787
DATA['SAPT DISP ENERGY']['HBC1-FaOOFaOO-3.5'] = -17.2848
DATA['SAPT DISP ENERGY']['HBC1-FaOOFaOO-3.6'] = -14.7279
DATA['SAPT DISP ENERGY']['HBC1-FaOOFaOO-3.7'] = -12.4633
DATA['SAPT DISP ENERGY']['HBC1-FaOOFaOO-3.8'] = -10.4655
DATA['SAPT DISP ENERGY']['HBC1-FaOOFaOO-3.9'] = -8.7228
DATA['SAPT DISP ENERGY']['HBC1-FaOOFaOO-4.0'] = -7.2269
DATA['SAPT DISP ENERGY']['HBC1-FaOOFaOO-4.1'] = -5.9643
DATA['SAPT DISP ENERGY']['HBC1-FaOOFaOO-4.2'] = -4.9138
DATA['SAPT DISP ENERGY']['HBC1-FaOOFaOO-4.3'] = -4.0493
DATA['SAPT DISP ENERGY']['HBC1-FaOOFaOO-4.4'] = -3.3407
DATA['SAPT DISP ENERGY']['HBC1-FaOOFaOO-4.6'] = -2.2899
DATA['SAPT DISP ENERGY']['HBC1-FaOOFaOO-4.8'] = -1.5884
DATA['SAPT DISP ENERGY']['HBC1-FaOOFaOO-5.0'] = -1.1175
DATA['SAPT DISP ENERGY']['HBC1-FaOOFaOO-5.4'] = -0.5804
DATA['SAPT DISP ENERGY']['HBC1-FaOOFaOO-5.8'] = -0.3214
DATA['SAPT DISP ENERGY']['HBC1-FaOOFaOO-6.4'] = -0.1474
DATA['SAPT DISP ENERGY']['HBC1-FaOOFaOO-7.0'] = -0.0751
DATA['SAPT DISP ENERGY']['HBC1-FaOOFaOO-8.0'] = -0.0288
DATA['SAPT DISP ENERGY']['HBC1-FaOOFaOO-10.0'] = -0.0063
DATA['SAPT DISP ENERGY']['HBC1-FaONFaON-3.4'] = -17.2503
DATA['SAPT DISP ENERGY']['HBC1-FaONFaON-3.5'] = -16.3885
DATA['SAPT DISP ENERGY']['HBC1-FaONFaON-3.6'] = -15.0474
DATA['SAPT DISP ENERGY']['HBC1-FaONFaON-3.7'] = -13.4999
DATA['SAPT DISP ENERGY']['HBC1-FaONFaON-3.8'] = -11.8902
DATA['SAPT DISP ENERGY']['HBC1-FaONFaON-3.9'] = -10.3082
DATA['SAPT DISP ENERGY']['HBC1-FaONFaON-4.0'] = -8.8206
DATA['SAPT DISP ENERGY']['HBC1-FaONFaON-4.1'] = -7.4651
DATA['SAPT DISP ENERGY']['HBC1-FaONFaON-4.2'] = -6.2676
DATA['SAPT DISP ENERGY']['HBC1-FaONFaON-4.3'] = -5.2349
DATA['SAPT DISP ENERGY']['HBC1-FaONFaON-4.4'] = -4.3599
DATA['SAPT DISP ENERGY']['HBC1-FaONFaON-4.6'] = -3.0193
DATA['SAPT DISP ENERGY']['HBC1-FaONFaON-4.8'] = -2.1006
DATA['SAPT DISP ENERGY']['HBC1-FaONFaON-5.0'] = -1.4756
DATA['SAPT DISP ENERGY']['HBC1-FaONFaON-5.4'] = -0.7610
DATA['SAPT DISP ENERGY']['HBC1-FaONFaON-5.8'] = -0.4192
DATA['SAPT DISP ENERGY']['HBC1-FaONFaON-6.4'] = -0.1916
DATA['SAPT DISP ENERGY']['HBC1-FaONFaON-7.0'] = -0.0975
DATA['SAPT DISP ENERGY']['HBC1-FaONFaON-8.0'] = -0.0374
DATA['SAPT DISP ENERGY']['HBC1-FaONFaON-10.0'] = -0.0082
DATA['SAPT DISP ENERGY']['HBC1-FaNNFaNN-3.4'] = -24.4145
DATA['SAPT DISP ENERGY']['HBC1-FaNNFaNN-3.5'] = -22.4062
DATA['SAPT DISP ENERGY']['HBC1-FaNNFaNN-3.6'] = -20.3457
DATA['SAPT DISP ENERGY']['HBC1-FaNNFaNN-3.7'] = -18.2904
DATA['SAPT DISP ENERGY']['HBC1-FaNNFaNN-3.8'] = -16.2769
DATA['SAPT DISP ENERGY']['HBC1-FaNNFaNN-3.9'] = -14.3466
DATA['SAPT DISP ENERGY']['HBC1-FaNNFaNN-4.0'] = -12.5294
DATA['SAPT DISP ENERGY']['HBC1-FaNNFaNN-4.1'] = -10.8451
DATA['SAPT DISP ENERGY']['HBC1-FaNNFaNN-4.2'] = -9.3148
DATA['SAPT DISP ENERGY']['HBC1-FaNNFaNN-4.3'] = -7.9450
DATA['SAPT DISP ENERGY']['HBC1-FaNNFaNN-4.4'] = -6.7425
DATA['SAPT DISP ENERGY']['HBC1-FaNNFaNN-4.6'] = -4.8077
DATA['SAPT DISP ENERGY']['HBC1-FaNNFaNN-4.8'] = -3.4088
DATA['SAPT DISP ENERGY']['HBC1-FaNNFaNN-5.0'] = -2.4203
DATA['SAPT DISP ENERGY']['HBC1-FaNNFaNN-5.4'] = -1.2493
DATA['SAPT DISP ENERGY']['HBC1-FaNNFaNN-5.8'] = -0.6780
DATA['SAPT DISP ENERGY']['HBC1-FaNNFaNN-6.4'] = -0.3008
DATA['SAPT DISP ENERGY']['HBC1-FaNNFaNN-7.0'] = -0.1487
DATA['SAPT DISP ENERGY']['HBC1-FaNNFaNN-8.0'] = -0.0552
DATA['SAPT DISP ENERGY']['HBC1-FaNNFaNN-10.0'] = -0.0117
DATA['SAPT DISP ENERGY']['HBC1-FaOOFaON-3.4'] = -18.8839
DATA['SAPT DISP ENERGY']['HBC1-FaOOFaON-3.5'] = -16.9853
DATA['SAPT DISP ENERGY']['HBC1-FaOOFaON-3.6'] = -14.9748
DATA['SAPT DISP ENERGY']['HBC1-FaOOFaON-3.7'] = -13.0020
DATA['SAPT DISP ENERGY']['HBC1-FaOOFaON-3.8'] = -11.1457
DATA['SAPT DISP ENERGY']['HBC1-FaOOFaON-3.9'] = -9.4466
DATA['SAPT DISP ENERGY']['HBC1-FaOOFaON-4.0'] = -7.9327
DATA['SAPT DISP ENERGY']['HBC1-FaOOFaON-4.1'] = -6.6160
DATA['SAPT DISP ENERGY']['HBC1-FaOOFaON-4.2'] = -5.4966
DATA['SAPT DISP ENERGY']['HBC1-FaOOFaON-4.3'] = -4.5596
DATA['SAPT DISP ENERGY']['HBC1-FaOOFaON-4.4'] = -3.7828
DATA['SAPT DISP ENERGY']['HBC1-FaOOFaON-4.6'] = -2.6135
DATA['SAPT DISP ENERGY']['HBC1-FaOOFaON-4.8'] = -1.8202
DATA['SAPT DISP ENERGY']['HBC1-FaOOFaON-5.0'] = -1.2814
DATA['SAPT DISP ENERGY']['HBC1-FaOOFaON-5.4'] = -0.6638
DATA['SAPT DISP ENERGY']['HBC1-FaOOFaON-5.8'] = -0.3669
DATA['SAPT DISP ENERGY']['HBC1-FaOOFaON-6.4'] = -0.1681
DATA['SAPT DISP ENERGY']['HBC1-FaOOFaON-7.0'] = -0.0855
DATA['SAPT DISP ENERGY']['HBC1-FaOOFaON-8.0'] = -0.0328
DATA['SAPT DISP ENERGY']['HBC1-FaOOFaON-10.0'] = -0.0072
DATA['SAPT DISP ENERGY']['HBC1-FaONFaNN-3.4'] = -20.9001
DATA['SAPT DISP ENERGY']['HBC1-FaONFaNN-3.5'] = -19.4373
DATA['SAPT DISP ENERGY']['HBC1-FaONFaNN-3.6'] = -17.7227
DATA['SAPT DISP ENERGY']['HBC1-FaONFaNN-3.7'] = -15.9090
DATA['SAPT DISP ENERGY']['HBC1-FaONFaNN-3.8'] = -14.0854
DATA['SAPT DISP ENERGY']['HBC1-FaONFaNN-3.9'] = -12.3193
DATA['SAPT DISP ENERGY']['HBC1-FaONFaNN-4.0'] = -10.6570
DATA['SAPT DISP ENERGY']['HBC1-FaONFaNN-4.1'] = -9.1325
DATA['SAPT DISP ENERGY']['HBC1-FaONFaNN-4.2'] = -7.7677
DATA['SAPT DISP ENERGY']['HBC1-FaONFaNN-4.3'] = -6.5682
DATA['SAPT DISP ENERGY']['HBC1-FaONFaNN-4.4'] = -5.5332
DATA['SAPT DISP ENERGY']['HBC1-FaONFaNN-4.6'] = -3.9039
DATA['SAPT DISP ENERGY']['HBC1-FaONFaNN-4.8'] = -2.7494
DATA['SAPT DISP ENERGY']['HBC1-FaONFaNN-5.0'] = -1.9435
DATA['SAPT DISP ENERGY']['HBC1-FaONFaNN-5.4'] = -0.9990
DATA['SAPT DISP ENERGY']['HBC1-FaONFaNN-5.8'] = -0.5431
DATA['SAPT DISP ENERGY']['HBC1-FaONFaNN-6.4'] = -0.2430
DATA['SAPT DISP ENERGY']['HBC1-FaONFaNN-7.0'] = -0.1213
DATA['SAPT DISP ENERGY']['HBC1-FaONFaNN-8.0'] = -0.0456
DATA['SAPT DISP ENERGY']['HBC1-FaONFaNN-10.0'] = -0.0098
DATA['SAPT DISP ENERGY']['HBC1-FaOOFaNN-3.6'] = -20.2662
DATA['SAPT DISP ENERGY']['HBC1-FaOOFaNN-3.7'] = -16.8268
DATA['SAPT DISP ENERGY']['HBC1-FaOOFaNN-3.8'] = -14.2247
DATA['SAPT DISP ENERGY']['HBC1-FaOOFaNN-3.9'] = -12.0323
DATA['SAPT DISP ENERGY']['HBC1-FaOOFaNN-4.0'] = -10.1428
DATA['SAPT DISP ENERGY']['HBC1-FaOOFaNN-4.1'] = -8.5128
DATA['SAPT DISP ENERGY']['HBC1-FaOOFaNN-4.2'] = -7.1158
DATA['SAPT DISP ENERGY']['HBC1-FaOOFaNN-4.3'] = -5.9283
DATA['SAPT DISP ENERGY']['HBC1-FaOOFaNN-4.4'] = -4.9304
DATA['SAPT DISP ENERGY']['HBC1-FaOOFaNN-4.6'] = -3.4097
DATA['SAPT DISP ENERGY']['HBC1-FaOOFaNN-4.8'] = -2.3712
DATA['SAPT DISP ENERGY']['HBC1-FaOOFaNN-5.0'] = -1.6657
DATA['SAPT DISP ENERGY']['HBC1-FaOOFaNN-5.4'] = -0.8562
DATA['SAPT DISP ENERGY']['HBC1-FaOOFaNN-5.8'] = -0.4678
DATA['SAPT DISP ENERGY']['HBC1-FaOOFaNN-6.4'] = -0.2105
DATA['SAPT DISP ENERGY']['HBC1-FaOOFaNN-7.0'] = -0.1055
DATA['SAPT DISP ENERGY']['HBC1-FaOOFaNN-8.0'] = -0.0398
DATA['SAPT DISP ENERGY']['HBC1-FaOOFaNN-10.0'] = -0.0085
| lgpl-3.0 |
cloudfoundry/php-buildpack-legacy | builds/runtimes/python-2.7.6/lib/python2.7/test/test_complex_args.py | 136 | 3507 |
import unittest
from test import test_support
import textwrap
class ComplexArgsTestCase(unittest.TestCase):
def check(self, func, expected, *args):
self.assertEqual(func(*args), expected)
# These functions are tested below as lambdas too. If you add a
# function test, also add a similar lambda test.
# Functions are wrapped in "exec" statements in order to
# silence Py3k warnings.
def test_func_parens_no_unpacking(self):
exec textwrap.dedent("""
def f(((((x))))): return x
self.check(f, 1, 1)
# Inner parens are elided, same as: f(x,)
def f(((x)),): return x
self.check(f, 2, 2)
""")
def test_func_1(self):
exec textwrap.dedent("""
def f(((((x),)))): return x
self.check(f, 3, (3,))
def f(((((x)),))): return x
self.check(f, 4, (4,))
def f(((((x))),)): return x
self.check(f, 5, (5,))
def f(((x),)): return x
self.check(f, 6, (6,))
""")
def test_func_2(self):
exec textwrap.dedent("""
def f(((((x)),),)): return x
self.check(f, 2, ((2,),))
""")
def test_func_3(self):
exec textwrap.dedent("""
def f((((((x)),),),)): return x
self.check(f, 3, (((3,),),))
""")
def test_func_complex(self):
exec textwrap.dedent("""
def f((((((x)),),),), a, b, c): return x, a, b, c
self.check(f, (3, 9, 8, 7), (((3,),),), 9, 8, 7)
def f(((((((x)),)),),), a, b, c): return x, a, b, c
self.check(f, (3, 9, 8, 7), (((3,),),), 9, 8, 7)
def f(a, b, c, ((((((x)),)),),)): return a, b, c, x
self.check(f, (9, 8, 7, 3), 9, 8, 7, (((3,),),))
""")
# Duplicate the tests above, but for lambda. If you add a lambda test,
# also add a similar function test above.
def test_lambda_parens_no_unpacking(self):
exec textwrap.dedent("""
f = lambda (((((x))))): x
self.check(f, 1, 1)
# Inner parens are elided, same as: f(x,)
f = lambda ((x)),: x
self.check(f, 2, 2)
""")
def test_lambda_1(self):
exec textwrap.dedent("""
f = lambda (((((x),)))): x
self.check(f, 3, (3,))
f = lambda (((((x)),))): x
self.check(f, 4, (4,))
f = lambda (((((x))),)): x
self.check(f, 5, (5,))
f = lambda (((x),)): x
self.check(f, 6, (6,))
""")
def test_lambda_2(self):
exec textwrap.dedent("""
f = lambda (((((x)),),)): x
self.check(f, 2, ((2,),))
""")
def test_lambda_3(self):
exec textwrap.dedent("""
f = lambda ((((((x)),),),)): x
self.check(f, 3, (((3,),),))
""")
def test_lambda_complex(self):
exec textwrap.dedent("""
f = lambda (((((x)),),),), a, b, c: (x, a, b, c)
self.check(f, (3, 9, 8, 7), (((3,),),), 9, 8, 7)
f = lambda ((((((x)),)),),), a, b, c: (x, a, b, c)
self.check(f, (3, 9, 8, 7), (((3,),),), 9, 8, 7)
f = lambda a, b, c, ((((((x)),)),),): (a, b, c, x)
self.check(f, (9, 8, 7, 3), 9, 8, 7, (((3,),),))
""")
def test_main():
with test_support.check_py3k_warnings(
("tuple parameter unpacking has been removed", SyntaxWarning),
("parenthesized argument names are invalid", SyntaxWarning)):
test_support.run_unittest(ComplexArgsTestCase)
if __name__ == "__main__":
test_main()
| mit |
wilsonxiao/machinekit | lib/python/popupkeyboard.py | 13 | 10233 | #!/usr/bin/env python
#------------------------------------------------------------------------------
# Copyright: 2013
# Author: Dewey Garrett <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#------------------------------------------------------------------------------
"""
# popup keyboard for use with touchscreen applications
# used by pyngcgui.py
# based on work of John Thornton's Buglump
Optional __init__() args:
glade_file (default = 'popupkeyboard.ui')
dialog_name (default = 'dialog') Main window
num_entry_name (default = 'num_entry') Entry for display
coord_buttons_name (default = 'coords') Box for coord buttons
use_coord_buttons (default = True) Enable coord buttons
Required objects for glade_file:
gtk.Window (Main window)
gtk.Entry (Entry for display)
Optional objects for galde_file:
gtk.*Box: (Box for coord buttons)
All buttons use a single handler named 'on_click'.
The PopupKeyboard class recognizes buttons by their LABEL.
Required button LABELS (case insensitive):
0,1,2,3,4,5,6,7,8,9,. (numbers and decimal point)
+/- (toggle sign)
clear (clear)
bs (back space)
save (save)
cancel (cancel)
Optional button LABELS (case insensitive):
x,y,z,a,b,c,u,v,w,d (9 axes plus d for diameter)
"""
import linuxcnc
import sys
import os
import pango
g_ui_dir = linuxcnc.SHARE + "/linuxcnc"
try:
import pygtk
pygtk.require('2.0')
except:
pass
try:
import gtk
except ImportError,msg:
print('GTK not available: %s' % msg)
sys.exit(1)
class PopupKeyboard:
def __init__(self
,glade_file=None
,dialog_name='dialog'
,num_entry_name='num_entry'
,coord_buttons_name='coords'
,use_coord_buttons=True
,theme_name=None
):
if (glade_file is None):
glade_file = os.path.join(g_ui_dir,'popupkeyboard.ui')
fontname ='sans 12 bold'
self.use_coord_buttons = use_coord_buttons
try:
import gtk.glade
except ImportError,detail:
print 'ImportError:',detail
except Exception,msg:
print 'Exception:',Exception
print sys.exc_info()
sys.exit(1)
self.builder = gtk.Builder()
self.builder.add_from_file(glade_file)
self.builder.connect_signals(self)
self.dialog = self.builder.get_object(dialog_name)
self.set_theme(theme_name)
self.num_entry = self.builder.get_object(num_entry_name)
try:
self.coord_buttons = self.builder.get_object(coord_buttons_name)
except:
self.coord_buttons = None
self.result = None
self.location = None
self.top = self.dialog.get_toplevel()
self.top.set_title(glade_file)
self.top.set_keep_above(True)
if (not self.use_coord_buttons):
if self.coord_buttons:
self.coord_buttons.hide()
# prevent closing of dialog by window manager, escape key , etc
# http://faq.pygtk.org/index.py?file=faq10.013.htp&req=show
self.top.connect("response", self.on_response) #reqd
self.top.connect("delete-event",self.on_delete) #reqd
self.top.connect("close", self.on_close) #probably not reqd
# find buttons with labels XYZABCUVW or D
# and show iff corresponding axis is in axis_mask
label_to_btn = {}
for btn in self.builder.get_objects():
if type(btn) is not gtk.Button:
continue
label_to_btn[btn.get_label().upper()] = btn
if isinstance(btn.child, gtk.Label):
lbl = btn.child
lbl.modify_font(pango.FontDescription(fontname))
try:
self.stat = linuxcnc.stat()
self.stat.poll()
has_x = False
for axno in range(0,9):
axname = 'XYZABCUVW'[axno]
if label_to_btn.has_key(axname):
b = label_to_btn[axname]
if bool(self.stat.axis_mask & (1 << axno)):
b.show()
if axno == 0: has_x = True
else:
b.hide()
bdiam = None
if label_to_btn.has_key('D'):
bdiam = label_to_btn['D']
if bdiam and has_x:
bdiam.show()
elif bdiam:
bdiam.hide()
except linuxcnc.error,msg:
self.stat = None
if self.coord_buttons is not None:
self.coord_buttons.hide()
print "linuxcnc must be running to use axis keys"
# continue without buttons for testing when linuxnc not running
except Exception, err:
print 'Exception:',Exception
print sys.exc_info()
sys.exit(1)
# making it insensitive clears the inital selection region
self.num_entry.set_state(gtk.STATE_INSENSITIVE)
self.num_entry.modify_text(gtk.STATE_INSENSITIVE
,gtk.gdk.color_parse('black'))
self.num_entry.modify_font(pango.FontDescription(fontname))
def set_theme(self,tname=None):
if tname is None:
return
screen = self.dialog.get_screen()
settings = gtk.settings_get_for_screen(screen)
settings.set_string_property('gtk-theme-name',tname,"")
theme = settings.get_property('gtk-theme-name')
def on_response(self,widget,response):
if response < 0:
widget.emit_stop_by_name('response')
return True
def on_delete(self,widget,event): return True
def on_close(self,widget,event): return True
def run(self,initial_value='',title=None):
if title is not None:
self.top.set_title(title)
self.num_entry.set_text(str(initial_value))
if self.location:
self.dialog.parse_geometry('+%d+%d'
% (self.location[0],self.location[1]))
self.num_entry.set_position(0)
self.dialog.run()
if self.result is None:
return False #user canceled
else:
return True
def on_click(self, widget, data=None):
l = widget.get_label()
e = self.num_entry
self.label_to_method(l)(e,l)
def label_to_method(self,l):
ll = l.lower()
if ll.find('clear') >= 0: return self.do_clear
if ll.find('save') >= 0: return self.do_save
if ll.find('cancel')>= 0: return self.do_cancel
if ll.find('+/-') >= 0: return self.do_sign
if ll.find('bs') >= 0: return self.do_backspace
if ll in ('.0123456789'): return self.do_number
if ll in ('xyzabcuvwd'): return self.do_axis_letter
return self.do_unknown_label
def do_unknown_label(self,e,l):
print 'PopupKeyboard:do_unknown_label: <%s>' % l
def do_number(self,e,l):
# not needed if INSENSITIVE:
# if e.get_selection_bounds(): e.delete_selection()
e.set_text(e.get_text() + l)
def do_backspace(self,e,l):
e.set_text(e.get_text()[:-1])
def do_sign(self,e,l):
current = e.get_text()
if current == '':
current = '-'
elif current[0] == '-':
current = current[1:]
else:
current = '-' + current
e.set_text(current)
def do_save(self,e,l):
self.result = e.get_text()
e.set_text('')
self.location = self.dialog.get_position()
self.dialog.hide() # tells dialog.run() to finish
def do_cancel(self,e,l):
self.result = None # None means canceled
self.dialog.hide() # tells dialog.run() to finish
def do_clear(self,e,l):
e.set_text('')
def do_axis_letter(self,e,l):
if self.stat:
self.stat.poll()
e.set_text("%.6g" % self.coord_value(l))
else:
print "linuxcnc must be running to use <%s> key" % l
def get_result(self):
return(self.result)
def coord_value(self,char):
# offset calc copied from emc_interface.py
# char = 'x' | 'y' | ...
# 'd' is for diameter
s = self.stat
s.poll()
p = s.position # tuple=(xvalue, yvalue, ...
return {
'x': p[0] - s.g5x_offset[0] - s.tool_offset[0],
'y': p[1] - s.g5x_offset[1] - s.tool_offset[1],
'z': p[2] - s.g5x_offset[2] - s.tool_offset[2],
'a': p[3] - s.g5x_offset[3] - s.tool_offset[3],
'b': p[4] - s.g5x_offset[4] - s.tool_offset[4],
'c': p[5] - s.g5x_offset[5] - s.tool_offset[5],
'u': p[6] - s.g5x_offset[6] - s.tool_offset[6],
'v': p[7] - s.g5x_offset[7] - s.tool_offset[7],
'w': p[8] - s.g5x_offset[8] - s.tool_offset[8],
'd':(p[0] - s.g5x_offset[0] - s.tool_offset[0])* 2,#2*R
}[char.lower()]
if __name__ == "__main__":
m = PopupKeyboard()
print "\nClear and Save to end test\n"
ct = 100
while True:
m.run(initial_value=''
,title=str(ct)
)
result = m.get_result()
print 'result = <%s>' % result
if result=='':
sys.exit(0)
ct += 1
gtk.main()
# vim: sts=4 sw=4 et
| gpl-3.0 |
nsfmc/pygments | scripts/check_sources.py | 6 | 7467 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Checker for file headers
~~~~~~~~~~~~~~~~~~~~~~~~
Make sure each Python file has a correct file header
including copyright and license information.
:copyright: Copyright 2006-2012 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys, os, re
import getopt
import cStringIO
from os.path import join, splitext, abspath
checkers = {}
def checker(*suffixes, **kwds):
only_pkg = kwds.pop('only_pkg', False)
def deco(func):
for suffix in suffixes:
checkers.setdefault(suffix, []).append(func)
func.only_pkg = only_pkg
return func
return deco
name_mail_re = r'[\w ]+(<.*?>)?'
copyright_re = re.compile(r'^ :copyright: Copyright 2006-2012 by '
r'the Pygments team, see AUTHORS\.$', re.UNICODE)
copyright_2_re = re.compile(r'^ %s(, %s)*[,.]$' %
(name_mail_re, name_mail_re), re.UNICODE)
coding_re = re.compile(r'coding[:=]\s*([-\w.]+)')
not_ix_re = re.compile(r'\bnot\s+\S+?\s+i[sn]\s\S+')
is_const_re = re.compile(r'if.*?==\s+(None|False|True)\b')
misspellings = ["developement", "adress", "verificate", # ALLOW-MISSPELLING
"informations"] # ALLOW-MISSPELLING
@checker('.py')
def check_syntax(fn, lines):
try:
compile(''.join(lines), fn, "exec")
except SyntaxError, err:
yield 0, "not compilable: %s" % err
@checker('.py')
def check_style_and_encoding(fn, lines):
encoding = 'ascii'
for lno, line in enumerate(lines):
if len(line) > 90:
yield lno+1, "line too long"
m = not_ix_re.search(line)
if m:
yield lno+1, '"' + m.group() + '"'
if is_const_re.search(line):
yield lno+1, 'using == None/True/False'
if lno < 2:
co = coding_re.search(line)
if co:
encoding = co.group(1)
try:
line.decode(encoding)
except UnicodeDecodeError, err:
yield lno+1, "not decodable: %s\n Line: %r" % (err, line)
except LookupError, err:
yield 0, "unknown encoding: %s" % encoding
encoding = 'latin1'
@checker('.py', only_pkg=True)
def check_fileheader(fn, lines):
# line number correction
c = 1
if lines[0:1] == ['#!/usr/bin/env python\n']:
lines = lines[1:]
c = 2
llist = []
docopen = False
for lno, l in enumerate(lines):
llist.append(l)
if lno == 0:
if l == '# -*- coding: rot13 -*-\n':
# special-case pony package
return
elif l != '# -*- coding: utf-8 -*-\n':
yield 1, "missing coding declaration"
elif lno == 1:
if l != '"""\n' and l != 'r"""\n':
yield 2, 'missing docstring begin (""")'
else:
docopen = True
elif docopen:
if l == '"""\n':
# end of docstring
if lno <= 4:
yield lno+c, "missing module name in docstring"
break
if l != "\n" and l[:4] != ' ' and docopen:
yield lno+c, "missing correct docstring indentation"
if lno == 2:
# if not in package, don't check the module name
modname = fn[:-3].replace('/', '.').replace('.__init__', '')
while modname:
if l.lower()[4:-1] == modname:
break
modname = '.'.join(modname.split('.')[1:])
else:
yield 3, "wrong module name in docstring heading"
modnamelen = len(l.strip())
elif lno == 3:
if l.strip() != modnamelen * "~":
yield 4, "wrong module name underline, should be ~~~...~"
else:
yield 0, "missing end and/or start of docstring..."
# check for copyright and license fields
license = llist[-2:-1]
if license != [" :license: BSD, see LICENSE for details.\n"]:
yield 0, "no correct license info"
ci = -3
copyright = [s.decode('utf-8') for s in llist[ci:ci+1]]
while copyright and copyright_2_re.match(copyright[0]):
ci -= 1
copyright = llist[ci:ci+1]
if not copyright or not copyright_re.match(copyright[0]):
yield 0, "no correct copyright info"
@checker('.py', '.html', '.js')
def check_whitespace_and_spelling(fn, lines):
for lno, line in enumerate(lines):
if "\t" in line:
yield lno+1, "OMG TABS!!!1 "
if line[:-1].rstrip(' \t') != line[:-1]:
yield lno+1, "trailing whitespace"
for word in misspellings:
if word in line and 'ALLOW-MISSPELLING' not in line:
yield lno+1, '"%s" used' % word
bad_tags = ('<b>', '<i>', '<u>', '<s>', '<strike>'
'<center>', '<big>', '<small>', '<font')
@checker('.html')
def check_xhtml(fn, lines):
for lno, line in enumerate(lines):
for bad_tag in bad_tags:
if bad_tag in line:
yield lno+1, "used " + bad_tag
def main(argv):
try:
gopts, args = getopt.getopt(argv[1:], "vi:")
except getopt.GetoptError:
print "Usage: %s [-v] [-i ignorepath]* [path]" % argv[0]
return 2
opts = {}
for opt, val in gopts:
if opt == '-i':
val = abspath(val)
opts.setdefault(opt, []).append(val)
if len(args) == 0:
path = '.'
elif len(args) == 1:
path = args[0]
else:
print "Usage: %s [-v] [-i ignorepath]* [path]" % argv[0]
return 2
verbose = '-v' in opts
num = 0
out = cStringIO.StringIO()
# TODO: replace os.walk run with iteration over output of
# `svn list -R`.
for root, dirs, files in os.walk(path):
if '.svn' in dirs:
dirs.remove('.svn')
if '-i' in opts and abspath(root) in opts['-i']:
del dirs[:]
continue
# XXX: awkward: for the Makefile call: don't check non-package
# files for file headers
in_pocoo_pkg = root.startswith('./pygments')
for fn in files:
fn = join(root, fn)
if fn[:2] == './': fn = fn[2:]
if '-i' in opts and abspath(fn) in opts['-i']:
continue
ext = splitext(fn)[1]
checkerlist = checkers.get(ext, None)
if not checkerlist:
continue
if verbose:
print "Checking %s..." % fn
try:
f = open(fn, 'r')
lines = list(f)
except (IOError, OSError), err:
print "%s: cannot open: %s" % (fn, err)
num += 1
continue
for checker in checkerlist:
if not in_pocoo_pkg and checker.only_pkg:
continue
for lno, msg in checker(fn, lines):
print >>out, "%s:%d: %s" % (fn, lno, msg)
num += 1
if verbose:
print
if num == 0:
print "No errors found."
else:
print out.getvalue().rstrip('\n')
print "%d error%s found." % (num, num > 1 and "s" or "")
return int(num > 0)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| bsd-2-clause |
stormi/tsunami | src/secondaires/navigation/commandes/pavillon/montrer.py | 1 | 3737 | # -*-coding:Utf-8 -*
# Copyright (c) 2014 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# raise of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this raise of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant le paramètre 'montrer' de la commande 'pavillon'."""
from primaires.interpreteur.masque.parametre import Parametre
class PrmMontrer(Parametre):
"""Commande 'pavillon montrer'.
"""
def __init__(self):
"""Constructeur du paramètre"""
Parametre.__init__(self, "montrer", "show")
self.schema = "<nom_objet>"
self.aide_courte = "montre le pavillon"
self.aide_longue = \
"Cette commande est assez identique à %pavillon% " \
"%pavillon:hisser%, mais au lieu de hisser le pavillon " \
"en tête de mât, elle le montre simplement pour être " \
"visible des autres navires. Cette commande permet donc " \
"de faire des signaux."
def ajouter(self):
"""Méthode appelée lors de l'ajout de la commande à l'interpréteur"""
nom_objet = self.noeud.get_masque("nom_objet")
nom_objet.proprietes["conteneurs"] = \
"(personnage.equipement.inventaire_simple.iter_objets_qtt(" \
"True), )"
nom_objet.proprietes["quantite"] = "True"
nom_objet.proprietes["conteneur"] = "True"
def interpreter(self, personnage, dic_masques):
"""Interprétation du paramètre"""
personnage.agir("poser")
salle = personnage.salle
if not hasattr(salle, "navire") or salle.navire is None or \
salle.navire.etendue is None:
personnage << "|err|Vous n'êtes pas sur un navire.|ff|"
return
navire = salle.navire
objets = list(dic_masques["nom_objet"].objets_qtt_conteneurs)
objets = [c[0] for c in objets]
pavillon = objets[0]
if not pavillon.est_de_type("pavillon"):
personnage << "|err|{} n'est pas un pavillon.|ff|".format(
pavillon.get_nom().capitalize())
return
navire.envoyer("{} est agité dans les airs.".format(
pavillon.get_nom().capitalize()))
navire.envoyer_autour("{} est agité sur " \
"{}.".format(pavillon.get_nom().capitalize(),
navire.desc_survol), 35)
| bsd-3-clause |
adobecs5/urp2015 | lib/python3.4/site-packages/pip/_vendor/requests/packages/urllib3/util/url.py | 375 | 5760 | from collections import namedtuple
from ..exceptions import LocationParseError
url_attrs = ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment']
class Url(namedtuple('Url', url_attrs)):
"""
Datastructure for representing an HTTP URL. Used as a return value for
:func:`parse_url`.
"""
slots = ()
def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None,
query=None, fragment=None):
return super(Url, cls).__new__(cls, scheme, auth, host, port, path,
query, fragment)
@property
def hostname(self):
"""For backwards-compatibility with urlparse. We're nice like that."""
return self.host
@property
def request_uri(self):
"""Absolute path including the query string."""
uri = self.path or '/'
if self.query is not None:
uri += '?' + self.query
return uri
@property
def netloc(self):
"""Network location including host and port"""
if self.port:
return '%s:%d' % (self.host, self.port)
return self.host
@property
def url(self):
"""
Convert self into a url
This function should more or less round-trip with :func:`.parse_url`. The
returned url may not be exactly the same as the url inputted to
:func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls
with a blank port will have : removed).
Example: ::
>>> U = parse_url('http://google.com/mail/')
>>> U.url
'http://google.com/mail/'
>>> Url('http', 'username:password', 'host.com', 80,
... '/path', 'query', 'fragment').url
'http://username:[email protected]:80/path?query#fragment'
"""
scheme, auth, host, port, path, query, fragment = self
url = ''
# We use "is not None" we want things to happen with empty strings (or 0 port)
if scheme is not None:
url += scheme + '://'
if auth is not None:
url += auth + '@'
if host is not None:
url += host
if port is not None:
url += ':' + str(port)
if path is not None:
url += path
if query is not None:
url += '?' + query
if fragment is not None:
url += '#' + fragment
return url
def __str__(self):
return self.url
def split_first(s, delims):
"""
Given a string and an iterable of delimiters, split on the first found
delimiter. Return two split parts and the matched delimiter.
If not found, then the first part is the full input string.
Example::
>>> split_first('foo/bar?baz', '?/=')
('foo', 'bar?baz', '/')
>>> split_first('foo/bar?baz', '123')
('foo/bar?baz', '', None)
Scales linearly with number of delims. Not ideal for large number of delims.
"""
min_idx = None
min_delim = None
for d in delims:
idx = s.find(d)
if idx < 0:
continue
if min_idx is None or idx < min_idx:
min_idx = idx
min_delim = d
if min_idx is None or min_idx < 0:
return s, '', None
return s[:min_idx], s[min_idx+1:], min_delim
def parse_url(url):
"""
Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is
performed to parse incomplete urls. Fields not provided will be None.
Partly backwards-compatible with :mod:`urlparse`.
Example::
>>> parse_url('http://google.com/mail/')
Url(scheme='http', host='google.com', port=None, path='/mail/', ...)
>>> parse_url('google.com:80')
Url(scheme=None, host='google.com', port=80, path=None, ...)
>>> parse_url('/foo?bar')
Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)
"""
# While this code has overlap with stdlib's urlparse, it is much
# simplified for our needs and less annoying.
# Additionally, this implementations does silly things to be optimal
# on CPython.
if not url:
# Empty
return Url()
scheme = None
auth = None
host = None
port = None
path = None
fragment = None
query = None
# Scheme
if '://' in url:
scheme, url = url.split('://', 1)
# Find the earliest Authority Terminator
# (http://tools.ietf.org/html/rfc3986#section-3.2)
url, path_, delim = split_first(url, ['/', '?', '#'])
if delim:
# Reassemble the path
path = delim + path_
# Auth
if '@' in url:
# Last '@' denotes end of auth part
auth, url = url.rsplit('@', 1)
# IPv6
if url and url[0] == '[':
host, url = url.split(']', 1)
host += ']'
# Port
if ':' in url:
_host, port = url.split(':', 1)
if not host:
host = _host
if port:
# If given, ports must be integers.
if not port.isdigit():
raise LocationParseError(url)
port = int(port)
else:
# Blank ports are cool, too. (rfc3986#section-3.2.3)
port = None
elif not host and url:
host = url
if not path:
return Url(scheme, auth, host, port, path, query, fragment)
# Fragment
if '#' in path:
path, fragment = path.split('#', 1)
# Query
if '?' in path:
path, query = path.split('?', 1)
return Url(scheme, auth, host, port, path, query, fragment)
def get_host(url):
"""
Deprecated. Use :func:`.parse_url` instead.
"""
p = parse_url(url)
return p.scheme or 'http', p.hostname, p.port
| apache-2.0 |
snakeleon/YouCompleteMe-x86 | third_party/ycmd/third_party/JediHTTP/vendor/jedi/test/test_speed.py | 24 | 1870 | """
Speed tests of Jedi. To prove that certain things don't take longer than they
should.
"""
import time
import functools
from .helpers import TestCase, cwd_at
import jedi
class TestSpeed(TestCase):
def _check_speed(time_per_run, number=4, run_warm=True):
""" Speed checks should typically be very tolerant. Some machines are
faster than others, but the tests should still pass. These tests are
here to assure that certain effects that kill jedi performance are not
reintroduced to Jedi."""
def decorated(func):
@functools.wraps(func)
def wrapper(self):
if run_warm:
func(self)
first = time.time()
for i in range(number):
func(self)
single_time = (time.time() - first) / number
print('\nspeed', func, single_time)
assert single_time < time_per_run
return wrapper
return decorated
@_check_speed(0.2)
def test_os_path_join(self):
s = "from posixpath import join; join('', '')."
assert len(jedi.Script(s).completions()) > 10 # is a str completion
@_check_speed(0.15)
def test_scipy_speed(self):
s = 'import scipy.weave; scipy.weave.inline('
script = jedi.Script(s, 1, len(s), '')
script.call_signatures()
#print(jedi.imports.imports_processed)
@_check_speed(0.8)
@cwd_at('test')
def test_precedence_slowdown(self):
"""
Precedence calculation can slow down things significantly in edge
cases. Having strange recursion structures increases the problem.
"""
with open('speed/precedence.py') as f:
line = len(f.read().splitlines())
assert jedi.Script(line=line, path='speed/precedence.py').goto_definitions()
| gpl-3.0 |
chetan/ansible | lib/ansible/module_utils/ec2.py | 19 | 7219 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Michael DeHaan <[email protected]>, 2012-2013
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
try:
from distutils.version import LooseVersion
HAS_LOOSE_VERSION = True
except:
HAS_LOOSE_VERSION = False
AWS_REGIONS = ['ap-northeast-1',
'ap-southeast-1',
'ap-southeast-2',
'eu-west-1',
'sa-east-1',
'us-east-1',
'us-west-1',
'us-west-2']
def aws_common_argument_spec():
return dict(
ec2_url=dict(),
aws_secret_key=dict(aliases=['ec2_secret_key', 'secret_key'], no_log=True),
aws_access_key=dict(aliases=['ec2_access_key', 'access_key']),
validate_certs=dict(default=True, type='bool'),
security_token=dict(no_log=True),
profile=dict(),
)
return spec
def ec2_argument_spec():
spec = aws_common_argument_spec()
spec.update(
dict(
region=dict(aliases=['aws_region', 'ec2_region'], choices=AWS_REGIONS),
)
)
return spec
def boto_supports_profile_name():
return hasattr(boto.ec2.EC2Connection, 'profile_name')
def get_aws_connection_info(module):
# Check module args for credentials, then check environment vars
# access_key
ec2_url = module.params.get('ec2_url')
access_key = module.params.get('aws_access_key')
secret_key = module.params.get('aws_secret_key')
security_token = module.params.get('security_token')
region = module.params.get('region')
profile_name = module.params.get('profile')
validate_certs = module.params.get('validate_certs')
if not ec2_url:
if 'EC2_URL' in os.environ:
ec2_url = os.environ['EC2_URL']
elif 'AWS_URL' in os.environ:
ec2_url = os.environ['AWS_URL']
if not access_key:
if 'EC2_ACCESS_KEY' in os.environ:
access_key = os.environ['EC2_ACCESS_KEY']
elif 'AWS_ACCESS_KEY_ID' in os.environ:
access_key = os.environ['AWS_ACCESS_KEY_ID']
elif 'AWS_ACCESS_KEY' in os.environ:
access_key = os.environ['AWS_ACCESS_KEY']
else:
# in case access_key came in as empty string
access_key = None
if not secret_key:
if 'EC2_SECRET_KEY' in os.environ:
secret_key = os.environ['EC2_SECRET_KEY']
elif 'AWS_SECRET_ACCESS_KEY' in os.environ:
secret_key = os.environ['AWS_SECRET_ACCESS_KEY']
elif 'AWS_SECRET_KEY' in os.environ:
secret_key = os.environ['AWS_SECRET_KEY']
else:
# in case secret_key came in as empty string
secret_key = None
if not region:
if 'EC2_REGION' in os.environ:
region = os.environ['EC2_REGION']
elif 'AWS_REGION' in os.environ:
region = os.environ['AWS_REGION']
else:
# boto.config.get returns None if config not found
region = boto.config.get('Boto', 'aws_region')
if not region:
region = boto.config.get('Boto', 'ec2_region')
if not security_token:
if 'AWS_SECURITY_TOKEN' in os.environ:
security_token = os.environ['AWS_SECURITY_TOKEN']
else:
# in case security_token came in as empty string
security_token = None
boto_params = dict(aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
security_token=security_token)
# profile_name only works as a key in boto >= 2.24
# so only set profile_name if passed as an argument
if profile_name:
if not boto_supports_profile_name():
module.fail_json("boto does not support profile_name before 2.24")
boto_params['profile_name'] = profile_name
if validate_certs and HAS_LOOSE_VERSION and LooseVersion(boto.Version) >= LooseVersion("2.6.0"):
boto_params['validate_certs'] = validate_certs
return region, ec2_url, boto_params
def get_ec2_creds(module):
''' for compatibility mode with old modules that don't/can't yet
use ec2_connect method '''
region, ec2_url, boto_params = get_aws_connection_info(module)
return ec2_url, boto_params['aws_access_key_id'], boto_params['aws_secret_access_key'], region
def boto_fix_security_token_in_profile(conn, profile_name):
''' monkey patch for boto issue boto/boto#2100 '''
profile = 'profile ' + profile_name
if boto.config.has_option(profile, 'aws_security_token'):
conn.provider.set_security_token(boto.config.get(profile, 'aws_security_token'))
return conn
def connect_to_aws(aws_module, region, **params):
conn = aws_module.connect_to_region(region, **params)
if params.get('profile_name'):
conn = boto_fix_security_token_in_profile(conn, params['profile_name'])
return conn
def ec2_connect(module):
""" Return an ec2 connection"""
region, ec2_url, boto_params = get_aws_connection_info(module)
# If we have a region specified, connect to its endpoint.
if region:
try:
ec2 = connect_to_aws(boto.ec2, region, **boto_params)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e))
# Otherwise, no region so we fallback to the old connection method
elif ec2_url:
try:
ec2 = boto.connect_ec2_endpoint(ec2_url, **boto_params)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="Either region or ec2_url must be specified")
return ec2
| gpl-3.0 |
Benster900/mhn | server/mhn/api/models.py | 12 | 8796 | import string
from random import choice
from datetime import datetime
from sqlalchemy import UniqueConstraint, func
from mhn import db
from mhn.api import APIModel
from mhn.auth.models import User
from mhn.common.clio import Clio
class Sensor(db.Model, APIModel):
# Defines some properties on the fields:
# required: Is required for creating object via
# a POST request.
# editable: Can be edited via a PUT request.
all_fields = {
'uuid': {'required': False, 'editable': False},
'name': {'required': True, 'editable': True},
'created_date': {'required': False, 'editable': False},
'ip': {'required': False, 'editable': False},
'hostname': {'required': True, 'editable': True},
'honeypot': {'required': True, 'editable': False}
}
__tablename__ = 'sensors'
id = db.Column(db.Integer, primary_key=True)
uuid = db.Column(db.String(36), unique=True)
name = db.Column(db.String(50))
created_date = db.Column(
db.DateTime(), default=datetime.utcnow)
ip = db.Column(db.String(15))
hostname = db.Column(db.String(50))
identifier = db.Column(db.String(50), unique=True)
honeypot = db.Column(db.String(50))
def __init__(
self, uuid=None, name=None, created_date=None, honeypot=None,
ip=None, hostname=None, identifier=None, **args):
self.uuid = uuid
self.name = name
self.created_date = created_date
self.ip = ip
self.hostname = hostname
self.identifier = identifier
self.honeypot = honeypot
def __repr__(self):
return '<Sensor>{}'.format(self.to_dict())
def to_dict(self):
return dict(
uuid=self.uuid, name=self.name, honeypot=self.honeypot,
created_date=str(self.created_date), ip=self.ip,
hostname=self.hostname, identifier=self.uuid,
# Extending with info from Mnemosyne.
secret=self.authkey.secret, publish=self.authkey.publish)
def new_auth_dict(self):
el = string.ascii_letters + string.digits
rand_str = lambda n: ''.join(choice(el) for _ in range(n))
return dict(secret=rand_str(16),
identifier=self.uuid, honeypot=self.honeypot,
subscribe=[], publish=Sensor.get_channels(self.honeypot))
@property
def attacks_count(self):
return Clio().counts.get_count(identifier=self.uuid)
@property
def authkey(self):
return Clio().authkey.get(identifier=self.uuid)
@staticmethod
def get_channels(honeypot):
from mhn import mhn
return mhn.config.get('HONEYPOT_CHANNELS', {}).get(honeypot, [])
class Rule(db.Model, APIModel):
# Defines some properties on the fields:
# required: Is required for creating object via
# a POST request.
# editable: Can be edited via a PUT request.
# Defaults to False.
all_fields = {
'message': {'required': True, 'editable': True},
'references': {'required': True, 'editable': False},
'classtype': {'required': True, 'editable': True},
'sid': {'required': True, 'editable': False},
'rev': {'required': True, 'editable': True},
'date': {'required': False, 'editable': False},
'rule_format': {'required': True, 'editable': False},
'is_active': {'required': False, 'editable': True},
'notes': {'required': False, 'editable': True}
}
__tablename__ = 'rules'
id = db.Column(db.Integer, primary_key=True)
message = db.Column(db.String(140))
references = db.relationship(
'Reference', backref='rule', lazy='dynamic')
classtype = db.Column(db.String(50))
sid = db.Column(db.Integer)
rev = db.Column(db.Integer)
date = db.Column(db.DateTime(), default=datetime.utcnow)
rule_format = db.Column(db.String(500))
is_active = db.Column(db.Boolean)
notes = db.Column(db.String(140))
__table_args__ = (UniqueConstraint(sid, rev),)
def __init__(self, msg=None, classtype=None, sid=None,
rev=None, date=None, rule_format=None, **args):
self.message = msg
self.classtype = classtype
self.sid = sid
self.rev = rev
self.rule_format = rule_format
self.is_active = True
def insert_refs(self, refs):
for r in refs:
ref = Reference()
ref.rule = self
ref.text = r
db.session.add(ref)
db.session.commit()
def to_dict(self):
return dict(sid=self.sid, rev=self.rev, msg=self.message,
classtype=self.classtype, is_active=self.is_active)
def __repr__(self):
return '<Rule>{}'.format(self.to_dict())
def render(self):
"""
Takes Rule model and renders itself to plain text.
"""
msg = 'msg:"{}"'.format(self.message)
classtype = 'classtype:{}'.format(self.classtype)
sid = 'sid:{}'.format(self.sid)
rev = 'rev:{}'.format(self.rev)
reference = ''
for r in self.references:
reference += 'reference:{}; '.format(r.text)
# Remove trailing '; ' from references.
reference = reference[:-2]
return self.rule_format.format(msg=msg, sid=sid, rev=rev,
classtype=classtype, reference=reference)
@classmethod
def renderall(cls):
"""
Renders latest revision of active rules.
This method must be called within a Flask app
context.
"""
rules = cls.query.filter_by(is_active=True).\
group_by(cls.sid).\
having(func.max(cls.rev))
return '\n\n'.join([ru.render() for ru in rules])
@classmethod
def bulk_import(cls, rulelist):
"""
Imports rules into the database.
This method must be called within a Flask app
context.
"""
cnt = 0
for ru in rulelist:
# Checking for rules with this sid.
if cls.query.\
filter_by(sid=ru['sid']).\
filter(cls.rev >= ru['rev']).count() == 0:
# All rules with this sid have lower rev number that
# the incoming one, or this is a new sid altogether.
rule = cls(**ru)
rule.insert_refs(ru['references'])
db.session.add(rule)
# Disabling older rules.
cls.query.\
filter_by(sid=ru['sid']).\
filter(cls.rev < ru['rev']).\
update({'is_active': False}, False)
cnt += 1
if cnt % 500 == 0:
print 'Imported {} rules so far...'.format(cnt)
print 'Finished Importing {} rules. Committing data'.format(cnt)
db.session.commit()
class RuleSource(db.Model, APIModel):
all_fields = {
'uri': {'required': True, 'editable': True},
'note': {'required': False, 'editable': True},
'name': {'required': True, 'editable': True},
}
__tablename__ = 'rule_sources'
id = db.Column(db.Integer, primary_key=True)
uri = db.Column(db.String(140))
note = db.Column(db.String(140))
name = db.Column(db.String(40))
def __repr__(self):
return '<RuleSource>{}'.format(self.to_dict())
def to_dict(self):
return dict(name=self.name, uri=self.uri, note=self.note)
class Reference(db.Model):
__tablename__ = 'rule_references'
id = db.Column(db.Integer, primary_key=True)
text = db.Column(db.String(140))
rule_id = db.Column(db.Integer,
db.ForeignKey('rules.id'))
class DeployScript(db.Model, APIModel):
all_fields = {
'script': {'required': True, 'editable': True},
'name': {'required': True, 'editable': True},
'date': {'required': False, 'editable': False},
'notes': {'required': True, 'editable': True},
}
__tablename__ = 'deploy_scripts'
id = db.Column(db.Integer, primary_key=True)
script = db.Column(db.String(102400))
date = db.Column(
db.DateTime(), default=datetime.utcnow)
notes = db.Column(db.String(140))
name = db.Column(db.String(140))
user_id = db.Column(db.Integer, db.ForeignKey(User.id))
user = db.relationship(User, uselist=False)
def __init__(self, name=None, script=None, notes=None):
self.name = name
self.script = script
self.notes = notes
def __repr__(self):
return '<DeployScript>{}'.format(self.to_dict())
def to_dict(self):
return dict(script=self.script, date=self.date, notes=self.notes,
user=self.user.email, id=self.id)
| lgpl-2.1 |
lfairchild/PmagPy | dialogs/grid_frame2.py | 2 | 48571 | """
GridFrame -- subclass of wx.Frame. Contains grid and buttons to manipulate it.
GridBuilder -- data methods for GridFrame (add data to frame, save it, etc.)
"""
#import pdb
import wx
from . import drop_down_menus2 as drop_down_menus
from . import pmag_widgets as pw
from . import magic_grid2 as magic_grid
from pmagpy import builder2 as builder
from pmagpy import pmag
from pmagpy.controlled_vocabularies2 import vocab
class GridFrame(wx.Frame):
#class GridFrame(wx.ScrolledWindow):
"""
make_magic
"""
def __init__(self, ErMagic, WD=None, frame_name="grid frame",
panel_name="grid panel", parent=None):
self.parent = parent
wx.GetDisplaySize()
title = 'Edit {} data'.format(panel_name)
#wx.Frame.__init__(self, parent=parent, id=wx.ID_ANY, name=frame_name, title=title)
#wx.ScrolledWindow.__init__(self, parent=parent, id=wx.ID_ANY, name=frame_name)#, title=title)
super(GridFrame, self).__init__(parent=parent, id=wx.ID_ANY, name=frame_name, title=title)
# if controlled vocabularies haven't already been grabbed from earthref
# do so now
if not any(vocab.vocabularies):
vocab.get_all_vocabulary()
self.remove_cols_mode = False
self.deleteRowButton = None
self.selected_rows = set()
self.er_magic = ErMagic
self.panel = wx.Panel(self, name=panel_name, size=wx.GetDisplaySize())
self.grid_type = panel_name
if self.parent:
self.Bind(wx.EVT_WINDOW_DESTROY, self.parent.Parent.on_close_grid_frame)
if self.grid_type == 'age':
ancestry_ind = self.er_magic.ancestry.index(self.er_magic.age_type)
self.child_type = self.er_magic.ancestry[ancestry_ind-1]
self.parent_type = self.er_magic.ancestry[ancestry_ind+1]
else:
try:
child_ind = self.er_magic.ancestry.index(self.grid_type) - 1
self.child_type = self.er_magic.ancestry[child_ind]
parent_ind = self.er_magic.ancestry.index(self.grid_type) + 1
self.parent_type = self.er_magic.ancestry[parent_ind]
except ValueError:
self.child_type = None
self.parent_type = None
self.WD = WD
self.InitUI()
## Initialization functions
def InitUI(self):
"""
initialize window
"""
self.main_sizer = wx.BoxSizer(wx.VERTICAL)
self.init_grid_headers()
self.grid_builder = GridBuilder(self.er_magic, self.grid_type, self.grid_headers,
self.panel, self.parent_type)
self.grid = self.grid_builder.make_grid()
self.grid.InitUI()
## Column management buttons
self.add_cols_button = wx.Button(self.panel, label="Add additional columns",
name='add_cols_btn')
self.Bind(wx.EVT_BUTTON, self.on_add_cols, self.add_cols_button)
self.remove_cols_button = wx.Button(self.panel, label="Remove columns",
name='remove_cols_btn')
self.Bind(wx.EVT_BUTTON, self.on_remove_cols, self.remove_cols_button)
## Row management buttons
self.remove_row_button = wx.Button(self.panel, label="Remove last row",
name='remove_last_row_btn')
self.Bind(wx.EVT_BUTTON, self.on_remove_row, self.remove_row_button)
many_rows_box = wx.BoxSizer(wx.HORIZONTAL)
self.add_many_rows_button = wx.Button(self.panel, label="Add row(s)",
name='add_many_rows_btn')
self.rows_spin_ctrl = wx.SpinCtrl(self.panel, value='1', initial=1,
name='rows_spin_ctrl')
many_rows_box.Add(self.add_many_rows_button, flag=wx.ALIGN_CENTRE)
many_rows_box.Add(self.rows_spin_ctrl)
self.Bind(wx.EVT_BUTTON, self.on_add_rows, self.add_many_rows_button)
self.deleteRowButton = wx.Button(self.panel, id=-1, label='Delete selected row(s)', name='delete_row_btn')
self.Bind(wx.EVT_BUTTON, lambda event: self.on_remove_row(event, False), self.deleteRowButton)
self.deleteRowButton.Disable()
## Data management buttons
self.importButton = wx.Button(self.panel, id=-1,
label='Import MagIC-format file', name='import_btn')
self.Bind(wx.EVT_BUTTON, self.onImport, self.importButton)
self.exitButton = wx.Button(self.panel, id=-1,
label='Save and close grid', name='save_and_quit_btn')
self.Bind(wx.EVT_BUTTON, self.onSave, self.exitButton)
self.cancelButton = wx.Button(self.panel, id=-1, label='Cancel', name='cancel_btn')
self.Bind(wx.EVT_BUTTON, self.onCancelButton, self.cancelButton)
## Help message and button
# button
self.toggle_help_btn = wx.Button(self.panel, id=-1, label="Show help",
name='toggle_help_btn')
self.Bind(wx.EVT_BUTTON, self.toggle_help, self.toggle_help_btn)
# message
self.help_msg_boxsizer = wx.StaticBoxSizer(wx.StaticBox(self.panel, -1, name='help_msg_boxsizer'), wx.VERTICAL)
self.default_msg_text = 'Edit {} here.\nYou can add or remove both rows and columns, however required columns may not be deleted.\nControlled vocabularies are indicated by **, and will have drop-down-menus.\nTo edit all values in a column, click the column header.\nYou can cut and paste a block of cells from an Excel-like file.\nJust click the top left cell and use command "v".\nColumns that pertain to interpretations will be marked with "++".'.format(self.grid_type + 's')
txt = ''
if self.grid_type == 'location':
txt = '\n\nNote: you can fill in location start/end latitude/longitude here.\nHowever, if you add sites in step 2, the program will calculate those values automatically,\nbased on site latitudes/logitudes.\nThese values will be written to your upload file.'
if self.grid_type == 'sample':
txt = "\n\nNote: you can fill in lithology, class, and type for each sample here.\nHowever, if the sample's class, lithology, and type are the same as its parent site,\nthose values will propagate down, and will be written to your sample file automatically."
if self.grid_type == 'specimen':
txt = "\n\nNote: you can fill in lithology, class, and type for each specimen here.\nHowever, if the specimen's class, lithology, and type are the same as its parent sample,\nthose values will propagate down, and will be written to your specimen file automatically."
if self.grid_type == 'age':
txt = "\n\nNote: only ages for which you provide data will be written to your upload file."
self.default_msg_text += txt
self.msg_text = wx.StaticText(self.panel, label=self.default_msg_text,
style=wx.TE_CENTER, name='msg text')
self.help_msg_boxsizer.Add(self.msg_text)
self.help_msg_boxsizer.ShowItems(False)
## Code message and button
# button
self.toggle_codes_btn = wx.Button(self.panel, id=-1, label="Show method codes",
name='toggle_codes_btn')
self.Bind(wx.EVT_BUTTON, self.toggle_codes, self.toggle_codes_btn)
# message
self.code_msg_boxsizer = pw.MethodCodeDemystifier(self.panel, vocab)
self.code_msg_boxsizer.ShowItems(False)
## Add content to sizers
self.hbox = wx.BoxSizer(wx.HORIZONTAL)
col_btn_vbox = wx.StaticBoxSizer(wx.StaticBox(self.panel, -1, label='Columns',
name='manage columns'), wx.VERTICAL)
row_btn_vbox = wx.StaticBoxSizer(wx.StaticBox(self.panel, -1, label='Rows',
name='manage rows'), wx.VERTICAL)
main_btn_vbox = wx.StaticBoxSizer(wx.StaticBox(self.panel, -1, label='Manage data',
name='manage data'), wx.VERTICAL)
col_btn_vbox.Add(self.add_cols_button, 1, flag=wx.ALL, border=5)
col_btn_vbox.Add(self.remove_cols_button, 1, flag=wx.ALL, border=5)
row_btn_vbox.Add(many_rows_box, 1, flag=wx.ALL, border=5)
row_btn_vbox.Add(self.remove_row_button, 1, flag=wx.ALL, border=5)
row_btn_vbox.Add(self.deleteRowButton, 1, flag=wx.ALL, border=5)
main_btn_vbox.Add(self.importButton, 1, flag=wx.ALL, border=5)
main_btn_vbox.Add(self.exitButton, 1, flag=wx.ALL, border=5)
main_btn_vbox.Add(self.cancelButton, 1, flag=wx.ALL, border=5)
self.hbox.Add(col_btn_vbox, 1)
self.hbox.Add(row_btn_vbox, 1)
self.hbox.Add(main_btn_vbox, 1)
self.panel.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK, self.onLeftClickLabel, self.grid)
self.Bind(wx.EVT_KEY_DOWN, self.on_key_down)
self.panel.Bind(wx.EVT_TEXT_PASTE, self.do_fit)
# add actual data!
self.grid_builder.add_data_to_grid(self.grid, self.grid_type)
if self.grid_type == 'age':
self.grid_builder.add_age_data_to_grid()
# add drop_down menus
if self.parent_type:
belongs_to = sorted(self.er_magic.data_lists[self.parent_type][0], key=lambda item: item.name)
else:
belongs_to = ''
self.drop_down_menu = drop_down_menus.Menus(self.grid_type, self, self.grid, belongs_to)
self.grid_box = wx.StaticBoxSizer(wx.StaticBox(self.panel, -1, name='grid container'), wx.VERTICAL)
self.grid_box.Add(self.grid, 1, flag=wx.ALL|wx.EXPAND, border=5)
# a few special touches if it is a location grid
if self.grid_type == 'location':
lat_lon_dict = self.er_magic.get_min_max_lat_lon(self.er_magic.locations)
for loc in self.er_magic.locations:
# try to fill in min/max latitudes/longitudes from sites
d = lat_lon_dict[loc.name]
col_labels = [self.grid.GetColLabelValue(col) for col in range(self.grid.GetNumberCols())]
row_labels = [self.grid.GetCellValue(row, 0) for row in range(self.grid.GetNumberRows())]
for key, value in list(d.items()):
if value:
if str(loc.er_data[key]) == str(value):
# no need to update
pass
else:
# update
loc.er_data[key] = value
col_ind = col_labels.index(key)
row_ind = row_labels.index(loc.name)
self.grid.SetCellValue(row_ind, col_ind, str(value))
if not self.grid.changes:
self.grid.changes = set([row_ind])
else:
self.grid.changes.add(row_ind)
# a few special touches if it is an age grid
if self.grid_type == 'age':
self.remove_row_button.Disable()
self.add_many_rows_button.Disable()
self.grid.SetColLabelValue(0, 'er_site_name')
toggle_box = wx.StaticBoxSizer(wx.StaticBox(self.panel, -1, label='Ages level', name='Ages level'), wx.VERTICAL)
levels = ['specimen', 'sample', 'site', 'location']
age_level = pw.radio_buttons(self.panel, levels, 'Choose level to assign ages')
level_ind = levels.index(self.er_magic.age_type)
age_level.radio_buttons[level_ind].SetValue(True)
toggle_box.Add(age_level)
self.Bind(wx.EVT_RADIOBUTTON, self.toggle_ages)
self.hbox.Add(toggle_box)
# a few special touches if it is a result grid
if self.grid_type == 'result':
# populate specimen_names, sample_names, etc.
self.drop_down_menu.choices[2] = [sorted([spec.name for spec in self.er_magic.specimens if spec]), False]
self.drop_down_menu.choices[3] = [sorted([samp.name for samp in self.er_magic.samples if samp]), False]
self.drop_down_menu.choices[4] = [sorted([site.name for site in self.er_magic.sites if site]), False]
self.drop_down_menu.choices[5] = [sorted([loc.name for loc in self.er_magic.locations if loc]), False]
for row in range(self.grid.GetNumberRows()):
result_name = self.grid.GetCellValue(row, 0)
result = self.er_magic.find_by_name(result_name, self.er_magic.results)
if result:
if result.specimens:
self.grid.SetCellValue(row, 2, ' : '.join([pmag.get_attr(spec) for spec in result.specimens]))
if result.samples:
self.grid.SetCellValue(row, 3, ' : '.join([pmag.get_attr(samp) for samp in result.samples]))
if result.sites:
self.grid.SetCellValue(row, 4, ' : '.join([pmag.get_attr(site) for site in result.sites]))
if result.locations:
self.grid.SetCellValue(row, 5, ' : '.join([pmag.get_attr(loc) for loc in result.locations]))
self.drop_down_menu.choices[5] = [sorted([loc.name for loc in self.er_magic.locations if loc]), False]
# final layout, set size
self.main_sizer.Add(self.hbox, flag=wx.ALL|wx.ALIGN_CENTER|wx.SHAPED, border=20)
self.main_sizer.Add(self.toggle_help_btn, .5, flag=wx.BOTTOM|wx.ALIGN_CENTRE|wx.SHAPED, border=5)
self.main_sizer.Add(self.help_msg_boxsizer, .5, flag=wx.BOTTOM|wx.ALIGN_CENTRE|wx.SHAPED, border=10)
self.main_sizer.Add(self.toggle_codes_btn, .5, flag=wx.BOTTOM|wx.ALIGN_CENTRE|wx.SHAPED, border=5)
self.main_sizer.Add(self.code_msg_boxsizer, .5, flag=wx.BOTTOM|wx.ALIGN_CENTRE|wx.SHAPED, border=5)
self.main_sizer.Add(self.grid_box, 2, flag=wx.ALL|wx.EXPAND, border=10)
self.panel.SetSizer(self.main_sizer)
self.main_sizer.Fit(self)
## this keeps sizing correct if the user resizes the window manually
#self.Bind(wx.EVT_SIZE, self.do_fit)
self.Centre()
self.Show()
def on_key_down(self, event):
"""
If user does command v, re-size window in case pasting has changed the content size.
"""
keycode = event.GetKeyCode()
meta_down = event.MetaDown() or event.GetCmdDown()
if keycode == 86 and meta_down:
# treat it as if it were a wx.EVT_TEXT_SIZE
self.do_fit(event)
def do_fit(self, event):
"""
Re-fit the window to the size of the content.
"""
#self.grid.ShowScrollbars(wx.SHOW_SB_NEVER, wx.SHOW_SB_NEVER)
if event:
event.Skip()
self.main_sizer.Fit(self)
disp_size = wx.GetDisplaySize()
actual_size = self.GetSize()
rows = self.grid.GetNumberRows()
# if there isn't enough room to display new content
# resize the frame
if disp_size[1] - 75 < actual_size[1]:
self.SetSize((actual_size[0], disp_size[1] * .95))
self.Centre()
def toggle_help(self, event, mode=None):
# if mode == 'open', show no matter what.
# if mode == 'close', close. otherwise, change state
btn = self.toggle_help_btn
shown = self.help_msg_boxsizer.GetStaticBox().IsShown()
# if mode is specified, do that mode
if mode == 'open':
self.help_msg_boxsizer.ShowItems(True)
btn.SetLabel('Hide help')
elif mode == 'close':
self.help_msg_boxsizer.ShowItems(False)
btn.SetLabel('Show help')
# otherwise, simply toggle states
else:
if shown:
self.help_msg_boxsizer.ShowItems(False)
btn.SetLabel('Show help')
else:
self.help_msg_boxsizer.ShowItems(True)
btn.SetLabel('Hide help')
self.do_fit(None)
def toggle_codes(self, event):
btn = event.GetEventObject()
if btn.Label == 'Show method codes':
self.code_msg_boxsizer.ShowItems(True)
btn.SetLabel('Hide method codes')
else:
self.code_msg_boxsizer.ShowItems(False)
btn.SetLabel('Show method codes')
self.do_fit(None)
def toggle_ages(self, event):
"""
Switch the type of grid between site/sample
(Users may add ages at either level)
"""
if self.grid.changes:
self.onSave(None)
label = event.GetEventObject().Label
self.er_magic.age_type = label
self.grid.Destroy()
# normally grid_frame is reset to None when grid is destroyed
# in this case we are simply replacing the grid, so we need to
# reset grid_frame
self.parent.Parent.grid_frame = self
self.parent.Parent.Hide()
self.grid = self.grid_builder.make_grid()
self.grid.InitUI()
self.panel.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK, self.onLeftClickLabel, self.grid)
self.grid_builder.add_data_to_grid(self.grid, self.grid_type)
self.grid_builder.add_age_data_to_grid()
self.drop_down_menu = drop_down_menus.Menus(self.grid_type, self, self.grid, None)
self.grid.SetColLabelValue(0, 'er_' + label + '_name')
self.grid.size_grid()
self.grid_box.Add(self.grid, flag=wx.ALL, border=5)
self.main_sizer.Fit(self)
if self.parent.Parent.validation_mode:
if 'age' in self.parent.Parent.validation_mode:
self.grid.paint_invalid_cells(self.parent.Parent.warn_dict['age'])
self.grid.ForceRefresh()
# the grid show up if it's the same size as the previous grid
# awkward solution (causes flashing):
if self.grid.Size[0] < 100:
if self.grid.GetWindowStyle() != wx.DOUBLE_BORDER:
self.grid.SetWindowStyle(wx.DOUBLE_BORDER)
self.main_sizer.Fit(self)
self.grid.SetWindowStyle(wx.NO_BORDER)
self.main_sizer.Fit(self)
def init_grid_headers(self):
self.grid_headers = self.er_magic.headers
## Grid event methods
def remove_col_label(self, event):#, include_pmag=True):
"""
check to see if column is required
if it is not, delete it from grid
"""
er_possible_headers = self.grid_headers[self.grid_type]['er'][2]
pmag_possible_headers = self.grid_headers[self.grid_type]['pmag'][2]
er_actual_headers = self.grid_headers[self.grid_type]['er'][0]
pmag_actual_headers = self.grid_headers[self.grid_type]['pmag'][0]
col = event.GetCol()
label = self.grid.GetColLabelValue(col)
if '**' in label:
label = label.strip('**')
if label in self.grid_headers[self.grid_type]['er'][1]:
pw.simple_warning("That header is required, and cannot be removed")
return False
#elif include_pmag and label in self.grid_headers[self.grid_type]['pmag'][1]:
# pw.simple_warning("That header is required, and cannot be removed")
# return False
else:
print('That header is not required:', label)
self.grid.remove_col(col)
#if label in er_possible_headers:
try:
print('removing {} from er_actual_headers'.format(label))
er_actual_headers.remove(label)
except ValueError:
pass
#if label in pmag_possible_headers:
try:
pmag_actual_headers.remove(label)
except ValueError:
pass
# causes resize on each column header delete
# can leave this out if we want.....
self.main_sizer.Fit(self)
def on_add_cols(self, event):
"""
Show simple dialog that allows user to add a new column name
"""
col_labels = self.grid.col_labels
# do not list headers that are already column labels in the grid
er_items = [head for head in self.grid_headers[self.grid_type]['er'][2] if head not in col_labels]
# remove unneeded headers
er_items = builder.remove_list_headers(er_items)
pmag_headers = sorted(list(set(self.grid_headers[self.grid_type]['pmag'][2]).union(self.grid_headers[self.grid_type]['pmag'][1])))
# do not list headers that are already column labels in the grid
# make sure that pmag_specific columns are marked with '++'
to_add = [i + '++' for i in self.er_magic.double if i in pmag_headers and i + '++' not in col_labels]
pmag_headers.extend(to_add)
pmag_items = [head for head in pmag_headers if head not in er_items and head not in col_labels]
# remove unneeded headers
pmag_items = sorted(builder.remove_list_headers(pmag_items))
dia = pw.HeaderDialog(self, 'columns to add', items1=er_items, items2=pmag_items)
dia.Centre()
result = dia.ShowModal()
new_headers = []
if result == 5100:
new_headers = dia.text_list
if not new_headers:
return
errors = self.add_new_grid_headers(new_headers, er_items, pmag_items)
if errors:
errors_str = ', '.join(errors)
pw.simple_warning('You are already using the following headers: {}\nSo they will not be added'.format(errors_str))
# problem: if widgets above the grid are too wide,
# the grid does not re-size when adding columns
# awkward solution (causes flashing):
if self.grid.GetWindowStyle() != wx.DOUBLE_BORDER:
self.grid.SetWindowStyle(wx.DOUBLE_BORDER)
self.main_sizer.Fit(self)
self.grid.SetWindowStyle(wx.NO_BORDER)
self.Centre()
self.main_sizer.Fit(self)
#
self.grid.changes = set(range(self.grid.GetNumberRows()))
dia.Destroy()
def add_new_grid_headers(self, new_headers, er_items, pmag_items):
"""
Add in all user-added headers.
If those new headers depend on other headers, add the other headers too.
"""
def add_pmag_reqd_headers():
if self.grid_type == 'result':
return []
add_in = []
col_labels = self.grid.col_labels
for reqd_head in self.grid_headers[self.grid_type]['pmag'][1]:
if reqd_head in self.er_magic.double:
if reqd_head + "++" not in col_labels:
add_in.append(reqd_head + "++")
else:
if reqd_head not in col_labels:
add_in.append(reqd_head)
add_in = builder.remove_list_headers(add_in)
return add_in
#
already_present = []
for name in new_headers:
if name:
if name not in self.grid.col_labels:
col_number = self.grid.add_col(name)
# add to appropriate headers list
if name in er_items:
self.grid_headers[self.grid_type]['er'][0].append(str(name))
if name in pmag_items:
name = name.strip('++')
if name not in self.grid_headers[self.grid_type]['pmag'][0]:
self.grid_headers[self.grid_type]['pmag'][0].append(str(name))
# add any required pmag headers that are not in the grid already
for header in add_pmag_reqd_headers():
col_number = self.grid.add_col(header)
# add drop_down_menus for added reqd columns
if header in vocab.possible_vocabularies:
self.drop_down_menu.add_drop_down(col_number, name)
if header in ['magic_method_codes++']:
self.drop_down_menu.add_method_drop_down(col_number, header)
# add drop down menus for user-added column
if name in vocab.possible_vocabularies:
self.drop_down_menu.add_drop_down(col_number, name)
if name in ['magic_method_codes', 'magic_method_codes++']:
self.drop_down_menu.add_method_drop_down(col_number, name)
else:
already_present.append(name)
#pw.simple_warning('You are already using column header: {}'.format(name))
return already_present
def on_remove_cols(self, event):
"""
enter 'remove columns' mode
"""
# open the help message
self.toggle_help(event=None, mode='open')
# first unselect any selected cols/cells
self.remove_cols_mode = True
self.grid.ClearSelection()
self.remove_cols_button.SetLabel("end delete column mode")
# change button to exit the delete columns mode
self.Unbind(wx.EVT_BUTTON, self.remove_cols_button)
self.Bind(wx.EVT_BUTTON, self.exit_col_remove_mode, self.remove_cols_button)
# then disable all other buttons
for btn in [self.add_cols_button, self.remove_row_button, self.add_many_rows_button]:
btn.Disable()
# then make some visual changes
self.msg_text.SetLabel("Remove grid columns: click on a column header to delete it. Required headers for {}s may not be deleted.".format(self.grid_type))
self.help_msg_boxsizer.Fit(self.help_msg_boxsizer.GetStaticBox())
self.main_sizer.Fit(self)
self.grid.SetWindowStyle(wx.DOUBLE_BORDER)
self.grid_box.GetStaticBox().SetWindowStyle(wx.DOUBLE_BORDER)
self.grid.Refresh()
self.main_sizer.Fit(self) # might not need this one
self.grid.changes = set(range(self.grid.GetNumberRows()))
def on_add_rows(self, event):
"""
add rows to grid
"""
num_rows = self.rows_spin_ctrl.GetValue()
#last_row = self.grid.GetNumberRows()
for row in range(num_rows):
self.grid.add_row()
#if not self.grid.changes:
# self.grid.changes = set([])
#self.grid.changes.add(last_row)
#last_row += 1
self.main_sizer.Fit(self)
def on_remove_row(self, event, row_num=-1):
"""
Remove specified grid row.
If no row number is given, remove the last row.
"""
if row_num == -1:
default = (255, 255, 255, 255)
# unhighlight any selected rows:
for row in self.selected_rows:
attr = wx.grid.GridCellAttr()
attr.SetBackgroundColour(default)
self.grid.SetRowAttr(row, attr)
row_num = self.grid.GetNumberRows() - 1
self.deleteRowButton.Disable()
self.selected_rows = {row_num}
function_mapping = {'specimen': self.er_magic.delete_specimen,
'sample': self.er_magic.delete_sample,
'site': self.er_magic.delete_site,
'location': self.er_magic.delete_location,
'result': self.er_magic.delete_result}
names = [self.grid.GetCellValue(row, 0) for row in self.selected_rows]
orphans = []
for name in names:
if name:
try:
row = self.grid.row_labels.index(name)
function_mapping[self.grid_type](name)
orphans.extend([name])
# if user entered a name, then deletes the row before saving,
# there will be a ValueError
except ValueError:
pass
self.grid.remove_row(row)
self.selected_rows = set()
self.deleteRowButton.Disable()
self.grid.Refresh()
self.main_sizer.Fit(self)
def exit_col_remove_mode(self, event):
"""
go back from 'remove cols' mode to normal
"""
# close help messge
self.toggle_help(event=None, mode='close')
# update mode
self.remove_cols_mode = False
# re-enable all buttons
for btn in [self.add_cols_button, self.remove_row_button, self.add_many_rows_button]:
btn.Enable()
# unbind grid click for deletion
self.Unbind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK)
# undo visual cues
self.grid.SetWindowStyle(wx.DEFAULT)
self.grid_box.GetStaticBox().SetWindowStyle(wx.DEFAULT)
self.msg_text.SetLabel(self.default_msg_text)
self.help_msg_boxsizer.Fit(self.help_msg_boxsizer.GetStaticBox())
self.main_sizer.Fit(self)
# re-bind self.remove_cols_button
self.Bind(wx.EVT_BUTTON, self.on_remove_cols, self.remove_cols_button)
self.remove_cols_button.SetLabel("Remove columns")
def onSelectRow(self, event):
"""
Highlight or unhighlight a row for possible deletion.
"""
grid = self.grid
row = event.Row
default = (255, 255, 255, 255)
highlight = (191, 216, 216, 255)
cell_color = grid.GetCellBackgroundColour(row, 0)
attr = wx.grid.GridCellAttr()
if cell_color == default:
attr.SetBackgroundColour(highlight)
self.selected_rows.add(row)
else:
attr.SetBackgroundColour(default)
try:
self.selected_rows.remove(row)
except KeyError:
pass
if self.selected_rows and self.deleteRowButton:
self.deleteRowButton.Enable()
else:
self.deleteRowButton.Disable()
grid.SetRowAttr(row, attr)
grid.Refresh()
def onLeftClickLabel(self, event):
"""
When user clicks on a grid label, determine if it is a row label or a col label.
Pass along the event to the appropriate function.
(It will either highlight a column for editing all values, or highlight a row for deletion).
"""
if event.Col == -1 and event.Row == -1:
pass
if event.Row < 0:
if self.remove_cols_mode:
self.remove_col_label(event)
else:
self.drop_down_menu.on_label_click(event)
else:
if event.Col < 0 and self.grid_type != 'age':
self.onSelectRow(event)
## Meta buttons -- cancel & save functions
def onImport(self, event):
openFileDialog = wx.FileDialog(self, "Open MagIC-format file", self.WD, "",
"MagIC file|*.*", wx.FD_OPEN | wx.FD_FILE_MUST_EXIST)
result = openFileDialog.ShowModal()
if result == wx.ID_OK:
if self.grid_type == 'age':
import_type = 'age'
parent_type = None
filename = openFileDialog.GetPath()
file_type = self.er_magic.get_age_info(filename)
import_type = file_type.split('_')[1][:-1]
elif self.grid_type == 'result':
import_type = 'result'
parent_type = None
try:
filename = openFileDialog.GetPath()
self.er_magic.get_results_info(filename)
except Exception as ex:
print('-W- ', ex)
print('-W- Could not read file:\n{}\nFile may be corrupted, or may not be a results format file.'.format(filename))
pw.simple_warning('Could not read file:\n{}\nFile may be corrupted, or may not be a results format file.'.format(filename))
return
else:
parent_ind = self.er_magic.ancestry.index(self.grid_type)
parent_type = self.er_magic.ancestry[parent_ind+1]
# get filename and file data
filename = openFileDialog.GetPath()
import_type = self.er_magic.get_magic_info(self.grid_type, parent_type,
filename=filename, sort_by_file_type=True)
# add any additional headers to the grid, while preserving all old headers
current_headers = self.grid_headers[self.grid_type]['er'][0]
self.er_magic.init_actual_headers()
er_headers = list(set(self.er_magic.headers[self.grid_type]['er'][0]).union(current_headers))
self.er_magic.headers[self.grid_type]['er'][0] = er_headers
include_pmag = False
if 'pmag' in filename and import_type == self.grid_type:
include_pmag = True
elif 'pmag' in filename and import_type != self.grid_type:
self.er_magic.incl_pmag_data.add(import_type)
#
if include_pmag:
pmag_headers = self.er_magic.headers[self.grid_type]['pmag'][0]
headers = set(er_headers).union(pmag_headers)
else:
headers = er_headers
for head in sorted(list(headers)):
if head not in self.grid.col_labels:
col_num = self.grid.add_col(head)
if head in vocab.possible_vocabularies:
self.drop_down_menu.add_drop_down(col_num, head)
# add age data
if import_type == 'age' and self.grid_type == 'age':
self.grid_builder.add_age_data_to_grid()
self.grid.size_grid()
self.main_sizer.Fit(self)
elif import_type == self.grid_type:
self.grid_builder.add_data_to_grid(self.grid, import_type)
self.grid.size_grid()
self.main_sizer.Fit(self)
# if imported data will not show up in current grid,
# warn user
else:
pw.simple_warning('You have imported a {} type file.\nYou\'ll need to open up your {} grid to see the added data'.format(import_type, import_type))
def onCancelButton(self, event):
if self.grid.changes:
dlg1 = wx.MessageDialog(self,caption="Message:", message="Are you sure you want to exit this grid?\nYour changes will not be saved.\n ", style=wx.OK|wx.CANCEL)
result = dlg1.ShowModal()
if result == wx.ID_OK:
dlg1.Destroy()
self.Destroy()
else:
self.Destroy()
def onSave(self, event):#, age_data_type='site'):
# first, see if there's any pmag_* data
# set er_magic.incl_pmag_data accordingly
pmag_header_found = False
actual_er_headers = self.er_magic.headers[self.grid_type]['er'][0]
actual_pmag_headers = self.er_magic.headers[self.grid_type]['pmag'][0]
for col in self.grid.col_labels:
if col not in actual_er_headers:
if col in actual_pmag_headers or col == 'magic_method_codes++':
pmag_header_found = True
break
if pmag_header_found:
self.er_magic.incl_pmag_data.add(self.grid_type)
else:
try:
self.er_magic.incl_pmag_data.remove(self.grid_type)
except KeyError:
pass
# then, tidy up drop_down menu
if self.drop_down_menu:
self.drop_down_menu.clean_up()
# then save actual data
self.grid_builder.save_grid_data()
if not event:
return
# then alert user
wx.MessageBox('Saved!', 'Info',
style=wx.OK | wx.ICON_INFORMATION)
self.Destroy()
class GridBuilder(object):
"""
Takes ErMagicBuilder data and put them into a MagicGrid
"""
def __init__(self, er_magic, grid_type, grid_headers, panel, parent_type=None):
self.er_magic = er_magic
self.grid_type = grid_type
self.grid_headers = grid_headers
self.panel = panel
self.parent_type = parent_type
self.grid = None
def make_grid(self, incl_pmag=True):
"""
return grid
"""
if incl_pmag and self.grid_type in self.er_magic.incl_pmag_data:
incl_pmag = True
else:
incl_pmag = False
er_header = self.grid_headers[self.grid_type]['er'][0]
if incl_pmag:
pmag_header = self.grid_headers[self.grid_type]['pmag'][0]
else:
pmag_header = []
# if we need to use '++' to distinguish pmag magic_method_codes from er
if incl_pmag and self.grid_type in ('specimen', 'sample', 'site'):
for double_header in self.er_magic.double:
try:
pmag_header.remove(double_header)
pmag_header.append(double_header + '++')
except ValueError:
pass
header = sorted(list(set(er_header).union(pmag_header)))
first_headers = []
for string in ['citation', '{}_class'.format(self.grid_type),
'{}_lithology'.format(self.grid_type), '{}_type'.format(self.grid_type),
'site_definition']:
for head in header[:]:
if string in head:
header.remove(head)
first_headers.append(head)
# the way we work it, each specimen is assigned to a sample
# each sample is assigned to a site
# specimens can not be added en masse to a site object, for example
# this data will be written in
for string in ['er_specimen_names', 'er_sample_names', 'er_site_names']:
for head in header[:]:
if string in head:
header.remove(head)
# do headers for results type grid
if self.grid_type == 'result':
#header.remove('pmag_result_name')
header[:0] = ['pmag_result_name', 'er_citation_names', 'er_specimen_names',
'er_sample_names', 'er_site_names', 'er_location_names']
elif self.grid_type == 'age':
for header_type in self.er_magic.first_age_headers:
if header_type in header:
header.remove(header_type)
lst = ['er_' + self.grid_type + '_name']
lst.extend(self.er_magic.first_age_headers)
header[:0] = lst
# do headers for all other data types without parents
elif not self.parent_type:
lst = ['er_' + self.grid_type + '_name']
lst.extend(first_headers)
header[:0] = lst
# do headers for all data types with parents
else:
lst = ['er_' + self.grid_type + '_name', 'er_' + self.parent_type + '_name']
lst.extend(first_headers)
header[:0] = lst
grid = magic_grid.MagicGrid(parent=self.panel, name=self.grid_type,
row_labels=[], col_labels=header,
double=self.er_magic.double)
grid.do_event_bindings()
self.grid = grid
return grid
def add_data_to_grid(self, grid, grid_type=None, incl_pmag=True):
incl_parents = True
if not grid_type:
grid_type = self.grid_type
if grid_type == 'age':
grid_type = self.er_magic.age_type
incl_parents = False
incl_pmag = False
# the two loops below may be overly expensive operations
# consider doing this another way
if grid_type == 'sample':
for sample in self.er_magic.samples:
sample.propagate_data()
if grid_type == 'specimen':
for specimen in self.er_magic.specimens:
specimen.propagate_data()
rows = self.er_magic.data_lists[grid_type][0]
grid.add_items(rows, incl_pmag=incl_pmag, incl_parents=incl_parents)
grid.size_grid()
# always start with at least one row:
if not rows:
grid.add_row()
# if adding actual data, remove the blank row
else:
if not grid.GetCellValue(0, 0):
grid.remove_row(0)
def add_age_data_to_grid(self):
dtype = self.er_magic.age_type
row_labels = [self.grid.GetCellValue(row, 0) for row in range(self.grid.GetNumberRows())]
items_list = self.er_magic.data_lists[dtype][0]
items = [self.er_magic.find_by_name(label, items_list) for label in row_labels if label]
col_labels = self.grid.col_labels[1:]
if not any(items):
return
for row_num, item in enumerate(items):
for col_num, label in enumerate(col_labels):
col_num += 1
if item:
if not label in list(item.age_data.keys()):
item.age_data[label] = ''
cell_value = item.age_data[label]
if cell_value:
self.grid.SetCellValue(row_num, col_num, cell_value)
# if no age codes are available, make sure magic_method_codes are set to ''
# otherwise non-age magic_method_codes can fill in here
elif label == 'magic_method_codes':
self.grid.SetCellValue(row_num, col_num, '')
def save_grid_data(self):
"""
Save grid data in the data object
"""
if not self.grid.changes:
print('-I- No changes to save')
return
if self.grid_type == 'age':
age_data_type = self.er_magic.age_type
self.er_magic.write_ages = True
starred_cols = self.grid.remove_starred_labels()
self.grid.SaveEditControlValue() # locks in value in cell currently edited
if self.grid.changes:
num_cols = self.grid.GetNumberCols()
for change in self.grid.changes:
if change == -1:
continue
else:
old_item = self.grid.row_items[change]
new_item_name = self.grid.GetCellValue(change, 0)
new_er_data = {}
new_pmag_data = {}
er_header = self.grid_headers[self.grid_type]['er'][0]
pmag_header = self.grid_headers[self.grid_type]['pmag'][0]
start_num = 2 if self.parent_type else 1
result_data = {}
for col in range(start_num, num_cols):
col_label = str(self.grid.GetColLabelValue(col))
value = str(self.grid.GetCellValue(change, col))
#new_data[col_label] = value
if value == '\t':
value = ''
if '++' in col_label:
col_name = col_label[:-2]
new_pmag_data[col_name] = value
continue
# pmag_* files are new interpretations, so should only have "This study"
# er_* files can have multiple citations
if col_label == 'er_citation_names':
new_pmag_data[col_label] = 'This study'
new_er_data[col_label] = value
continue
if er_header and (col_label in er_header):
new_er_data[col_label] = value
if self.grid_type in ('specimen', 'sample', 'site'):
if pmag_header and (col_label in pmag_header) and (col_label not in self.er_magic.double):
new_pmag_data[col_label] = value
else:
if pmag_header and (col_label in pmag_header):
new_pmag_data[col_label] = value
if col_label in ('er_specimen_names', 'er_sample_names',
'er_site_names', 'er_location_names'):
result_data[col_label] = value
# if there is an item in the data, get its name
if isinstance(old_item, str):
old_item_name = None
else:
old_item_name = self.grid.row_items[change].name
if self.parent_type:
new_parent_name = self.grid.GetCellValue(change, 1)
else:
new_parent_name = ''
# create a new item
if new_item_name and not old_item_name:
print('-I- make new item named', new_item_name)
if self.grid_type == 'result':
specs, samps, sites, locs = self.get_result_children(result_data)
item = self.er_magic.add_result(new_item_name, specs, samps, sites,
locs, new_pmag_data)
else:
item = self.er_magic.add_methods[self.grid_type](new_item_name, new_parent_name,
new_er_data, new_pmag_data)
# update an existing item
elif new_item_name and old_item_name:
print('-I- update existing {} formerly named {} to {}'.format(self.grid_type,
old_item_name,
new_item_name))
if self.grid_type == 'result':
specs, samps, sites, locs = self.get_result_children(result_data)
item = self.er_magic.update_methods['result'](old_item_name, new_item_name,
new_er_data=None,
new_pmag_data=new_pmag_data,
spec_names=specs,
samp_names=samps,
site_names=sites,
loc_names=locs,
replace_data=True)
elif self.grid_type == 'age':
item_type = age_data_type
item = self.er_magic.update_methods['age'](old_item_name, new_er_data,
item_type, replace_data=True)
else:
item = self.er_magic.update_methods[self.grid_type](old_item_name, new_item_name,
new_parent_name, new_er_data,
new_pmag_data, replace_data=True)
def get_result_children(self, result_data):
"""
takes in dict in form of {'er_specimen_names': 'name1:name2:name3'}
and so forth.
returns lists of specimens, samples, sites, and locations
"""
specimens, samples, sites, locations = "", "", "", ""
children = {'specimen': specimens, 'sample': samples,
'site': sites, 'location': locations}
for dtype in children:
header_name = 'er_' + dtype + '_names'
if result_data[header_name]:
children[dtype] = result_data[header_name].split(":")
# make sure there are no extra spaces in names
children[dtype] = [child.strip() for child in children[dtype]]
return children['specimen'], children['sample'], children['site'], children['location']
| bsd-3-clause |
cyanna/edx-platform | lms/djangoapps/class_dashboard/tests/test_dashboard_data.py | 13 | 13192 | """
Tests for class dashboard (Metrics tab in instructor dashboard)
"""
import json
from django.core.urlresolvers import reverse
from django.test.client import RequestFactory
from mock import patch
from capa.tests.response_xml_factory import StringResponseXMLFactory
from courseware.tests.factories import StudentModuleFactory
from student.tests.factories import UserFactory, CourseEnrollmentFactory, AdminFactory
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from class_dashboard.dashboard_data import (
get_problem_grade_distribution, get_sequential_open_distrib,
get_problem_set_grade_distrib, get_d3_problem_grade_distrib,
get_d3_sequential_open_distrib, get_d3_section_grade_distrib,
get_section_display_name, get_array_section_has_problem,
get_students_opened_subsection, get_students_problem_grades,
)
from class_dashboard.views import has_instructor_access_for_class
USER_COUNT = 11
class TestGetProblemGradeDistribution(ModuleStoreTestCase):
"""
Tests related to class_dashboard/dashboard_data.py
"""
def setUp(self):
super(TestGetProblemGradeDistribution, self).setUp()
self.request_factory = RequestFactory()
self.instructor = AdminFactory.create()
self.client.login(username=self.instructor.username, password='test')
self.attempts = 3
self.course = CourseFactory.create(
display_name=u"test course omega \u03a9",
)
section = ItemFactory.create(
parent_location=self.course.location,
category="chapter",
display_name=u"test factory section omega \u03a9",
)
self.sub_section = ItemFactory.create(
parent_location=section.location,
category="sequential",
display_name=u"test subsection omega \u03a9",
)
unit = ItemFactory.create(
parent_location=self.sub_section.location,
category="vertical",
metadata={'graded': True, 'format': 'Homework'},
display_name=u"test unit omega \u03a9",
)
self.users = [UserFactory.create(username="metric" + str(__)) for __ in xrange(USER_COUNT)]
for user in self.users:
CourseEnrollmentFactory.create(user=user, course_id=self.course.id)
for i in xrange(USER_COUNT - 1):
category = "problem"
self.item = ItemFactory.create(
parent_location=unit.location,
category=category,
data=StringResponseXMLFactory().build_xml(answer='foo'),
metadata={'rerandomize': 'always'},
display_name=u"test problem omega \u03a9 " + str(i)
)
for j, user in enumerate(self.users):
StudentModuleFactory.create(
grade=1 if i < j else 0,
max_grade=1 if i < j else 0.5,
student=user,
course_id=self.course.id,
module_state_key=self.item.location,
state=json.dumps({'attempts': self.attempts}),
)
for j, user in enumerate(self.users):
StudentModuleFactory.create(
course_id=self.course.id,
module_type='sequential',
module_state_key=self.item.location,
)
def test_get_problem_grade_distribution(self):
prob_grade_distrib, total_student_count = get_problem_grade_distribution(self.course.id)
for problem in prob_grade_distrib:
max_grade = prob_grade_distrib[problem]['max_grade']
self.assertEquals(1, max_grade)
for val in total_student_count.values():
self.assertEquals(USER_COUNT, val)
def test_get_sequential_open_distibution(self):
sequential_open_distrib = get_sequential_open_distrib(self.course.id)
for problem in sequential_open_distrib:
num_students = sequential_open_distrib[problem]
self.assertEquals(USER_COUNT, num_students)
def test_get_problemset_grade_distrib(self):
prob_grade_distrib, __ = get_problem_grade_distribution(self.course.id)
probset_grade_distrib = get_problem_set_grade_distrib(self.course.id, prob_grade_distrib)
for problem in probset_grade_distrib:
max_grade = probset_grade_distrib[problem]['max_grade']
self.assertEquals(1, max_grade)
grade_distrib = probset_grade_distrib[problem]['grade_distrib']
sum_attempts = 0
for item in grade_distrib:
sum_attempts += item[1]
self.assertEquals(USER_COUNT, sum_attempts)
def test_get_d3_problem_grade_distrib(self):
d3_data = get_d3_problem_grade_distrib(self.course.id)
for data in d3_data:
for stack_data in data['data']:
sum_values = 0
for problem in stack_data['stackData']:
sum_values += problem['value']
self.assertEquals(USER_COUNT, sum_values)
def test_get_d3_sequential_open_distrib(self):
d3_data = get_d3_sequential_open_distrib(self.course.id)
for data in d3_data:
for stack_data in data['data']:
for problem in stack_data['stackData']:
value = problem['value']
self.assertEquals(0, value)
def test_get_d3_section_grade_distrib(self):
d3_data = get_d3_section_grade_distrib(self.course.id, 0)
for stack_data in d3_data:
sum_values = 0
for problem in stack_data['stackData']:
sum_values += problem['value']
self.assertEquals(USER_COUNT, sum_values)
def test_get_students_problem_grades(self):
attributes = '?module_id=' + self.item.location.to_deprecated_string()
request = self.request_factory.get(reverse('get_students_problem_grades') + attributes)
response = get_students_problem_grades(request)
response_content = json.loads(response.content)['results']
response_max_exceeded = json.loads(response.content)['max_exceeded']
self.assertEquals(USER_COUNT, len(response_content))
self.assertEquals(False, response_max_exceeded)
for item in response_content:
if item['grade'] == 0:
self.assertEquals(0, item['percent'])
else:
self.assertEquals(100, item['percent'])
def test_get_students_problem_grades_max(self):
with patch('class_dashboard.dashboard_data.MAX_SCREEN_LIST_LENGTH', 2):
attributes = '?module_id=' + self.item.location.to_deprecated_string()
request = self.request_factory.get(reverse('get_students_problem_grades') + attributes)
response = get_students_problem_grades(request)
response_results = json.loads(response.content)['results']
response_max_exceeded = json.loads(response.content)['max_exceeded']
# Only 2 students in the list and response_max_exceeded is True
self.assertEquals(2, len(response_results))
self.assertEquals(True, response_max_exceeded)
def test_get_students_problem_grades_csv(self):
tooltip = 'P1.2.1 Q1 - 3382 Students (100%: 1/1 questions)'
attributes = '?module_id=' + self.item.location.to_deprecated_string() + '&tooltip=' + tooltip + '&csv=true'
request = self.request_factory.get(reverse('get_students_problem_grades') + attributes)
response = get_students_problem_grades(request)
# Check header and a row for each student in csv response
self.assertContains(response, '"Name","Username","Grade","Percent"')
self.assertContains(response, '"metric0","0.0","0.0"')
self.assertContains(response, '"metric1","0.0","0.0"')
self.assertContains(response, '"metric2","0.0","0.0"')
self.assertContains(response, '"metric3","0.0","0.0"')
self.assertContains(response, '"metric4","0.0","0.0"')
self.assertContains(response, '"metric5","0.0","0.0"')
self.assertContains(response, '"metric6","0.0","0.0"')
self.assertContains(response, '"metric7","0.0","0.0"')
self.assertContains(response, '"metric8","0.0","0.0"')
self.assertContains(response, '"metric9","0.0","0.0"')
self.assertContains(response, '"metric10","1.0","100.0"')
def test_get_students_opened_subsection(self):
attributes = '?module_id=' + self.item.location.to_deprecated_string()
request = self.request_factory.get(reverse('get_students_opened_subsection') + attributes)
response = get_students_opened_subsection(request)
response_results = json.loads(response.content)['results']
response_max_exceeded = json.loads(response.content)['max_exceeded']
self.assertEquals(USER_COUNT, len(response_results))
self.assertEquals(False, response_max_exceeded)
def test_get_students_opened_subsection_max(self):
with patch('class_dashboard.dashboard_data.MAX_SCREEN_LIST_LENGTH', 2):
attributes = '?module_id=' + self.item.location.to_deprecated_string()
request = self.request_factory.get(reverse('get_students_opened_subsection') + attributes)
response = get_students_opened_subsection(request)
response_results = json.loads(response.content)['results']
response_max_exceeded = json.loads(response.content)['max_exceeded']
# Only 2 students in the list and response_max_exceeded is True
self.assertEquals(2, len(response_results))
self.assertEquals(True, response_max_exceeded)
def test_get_students_opened_subsection_csv(self):
tooltip = '4162 students opened Subsection 5: Relational Algebra Exercises'
attributes = '?module_id=' + self.item.location.to_deprecated_string() + '&tooltip=' + tooltip + '&csv=true'
request = self.request_factory.get(reverse('get_students_opened_subsection') + attributes)
response = get_students_opened_subsection(request)
self.assertContains(response, '"Name","Username"')
# Check response contains 1 line for each user +1 for the header
self.assertEquals(USER_COUNT + 1, len(response.content.splitlines()))
def test_post_metrics_data_subsections_csv(self):
url = reverse('post_metrics_data_csv')
sections = json.dumps(["Introduction"])
tooltips = json.dumps([[{"subsection_name": "Pre-Course Survey", "subsection_num": 1, "type": "subsection", "num_students": 18963}]])
course_id = self.course.id
data_type = 'subsection'
data = json.dumps({'sections': sections,
'tooltips': tooltips,
'course_id': course_id.to_deprecated_string(),
'data_type': data_type,
})
response = self.client.post(url, {'data': data})
# Check response contains 1 line for header, 1 line for Section and 1 line for Subsection
self.assertEquals(3, len(response.content.splitlines()))
def test_post_metrics_data_problems_csv(self):
url = reverse('post_metrics_data_csv')
sections = json.dumps(["Introduction"])
tooltips = json.dumps([[[
{'student_count_percent': 0,
'problem_name': 'Q1',
'grade': 0,
'percent': 0,
'label': 'P1.2.1',
'max_grade': 1,
'count_grade': 26,
'type': u'problem'},
{'student_count_percent': 99,
'problem_name': 'Q1',
'grade': 1,
'percent': 100,
'label': 'P1.2.1',
'max_grade': 1,
'count_grade': 4763,
'type': 'problem'},
]]])
course_id = self.course.id
data_type = 'problem'
data = json.dumps({'sections': sections,
'tooltips': tooltips,
'course_id': course_id.to_deprecated_string(),
'data_type': data_type,
})
response = self.client.post(url, {'data': data})
# Check response contains 1 line for header, 1 line for Sections and 2 lines for problems
self.assertEquals(4, len(response.content.splitlines()))
def test_get_section_display_name(self):
section_display_name = get_section_display_name(self.course.id)
self.assertMultiLineEqual(section_display_name[0], u"test factory section omega \u03a9")
def test_get_array_section_has_problem(self):
b_section_has_problem = get_array_section_has_problem(self.course.id)
self.assertEquals(b_section_has_problem[0], True)
def test_has_instructor_access_for_class(self):
"""
Test for instructor access
"""
ret_val = has_instructor_access_for_class(self.instructor, self.course.id)
self.assertEquals(ret_val, True)
| agpl-3.0 |
40223119/2015w13-1 | static/Brython3.1.3-20150514-095342/Lib/fractions.py | 722 | 23203 | # Originally contributed by Sjoerd Mullender.
# Significantly modified by Jeffrey Yasskin <jyasskin at gmail.com>.
"""Fraction, infinite-precision, real numbers."""
from decimal import Decimal
import math
import numbers
import operator
import re
import sys
__all__ = ['Fraction', 'gcd']
def gcd(a, b):
"""Calculate the Greatest Common Divisor of a and b.
Unless b==0, the result will have the same sign as b (so that when
b is divided by it, the result comes out positive).
"""
while b:
a, b = b, a%b
return a
# Constants related to the hash implementation; hash(x) is based
# on the reduction of x modulo the prime _PyHASH_MODULUS.
_PyHASH_MODULUS = sys.hash_info.modulus
# Value to be used for rationals that reduce to infinity modulo
# _PyHASH_MODULUS.
_PyHASH_INF = sys.hash_info.inf
_RATIONAL_FORMAT = re.compile(r"""
\A\s* # optional whitespace at the start, then
(?P<sign>[-+]?) # an optional sign, then
(?=\d|\.\d) # lookahead for digit or .digit
(?P<num>\d*) # numerator (possibly empty)
(?: # followed by
(?:/(?P<denom>\d+))? # an optional denominator
| # or
(?:\.(?P<decimal>\d*))? # an optional fractional part
(?:E(?P<exp>[-+]?\d+))? # and optional exponent
)
\s*\Z # and optional whitespace to finish
""", re.VERBOSE | re.IGNORECASE)
class Fraction(numbers.Rational):
"""This class implements rational numbers.
In the two-argument form of the constructor, Fraction(8, 6) will
produce a rational number equivalent to 4/3. Both arguments must
be Rational. The numerator defaults to 0 and the denominator
defaults to 1 so that Fraction(3) == 3 and Fraction() == 0.
Fractions can also be constructed from:
- numeric strings similar to those accepted by the
float constructor (for example, '-2.3' or '1e10')
- strings of the form '123/456'
- float and Decimal instances
- other Rational instances (including integers)
"""
__slots__ = ('_numerator', '_denominator')
# We're immutable, so use __new__ not __init__
def __new__(cls, numerator=0, denominator=None):
"""Constructs a Rational.
Takes a string like '3/2' or '1.5', another Rational instance, a
numerator/denominator pair, or a float.
Examples
--------
>>> Fraction(10, -8)
Fraction(-5, 4)
>>> Fraction(Fraction(1, 7), 5)
Fraction(1, 35)
>>> Fraction(Fraction(1, 7), Fraction(2, 3))
Fraction(3, 14)
>>> Fraction('314')
Fraction(314, 1)
>>> Fraction('-35/4')
Fraction(-35, 4)
>>> Fraction('3.1415') # conversion from numeric string
Fraction(6283, 2000)
>>> Fraction('-47e-2') # string may include a decimal exponent
Fraction(-47, 100)
>>> Fraction(1.47) # direct construction from float (exact conversion)
Fraction(6620291452234629, 4503599627370496)
>>> Fraction(2.25)
Fraction(9, 4)
>>> Fraction(Decimal('1.47'))
Fraction(147, 100)
"""
self = super(Fraction, cls).__new__(cls)
if denominator is None:
if isinstance(numerator, numbers.Rational):
self._numerator = numerator.numerator
self._denominator = numerator.denominator
return self
elif isinstance(numerator, float):
# Exact conversion from float
value = Fraction.from_float(numerator)
self._numerator = value._numerator
self._denominator = value._denominator
return self
elif isinstance(numerator, Decimal):
value = Fraction.from_decimal(numerator)
self._numerator = value._numerator
self._denominator = value._denominator
return self
elif isinstance(numerator, str):
# Handle construction from strings.
m = _RATIONAL_FORMAT.match(numerator)
if m is None:
raise ValueError('Invalid literal for Fraction: %r' %
numerator)
numerator = int(m.group('num') or '0')
denom = m.group('denom')
if denom:
denominator = int(denom)
else:
denominator = 1
decimal = m.group('decimal')
if decimal:
scale = 10**len(decimal)
numerator = numerator * scale + int(decimal)
denominator *= scale
exp = m.group('exp')
if exp:
exp = int(exp)
if exp >= 0:
numerator *= 10**exp
else:
denominator *= 10**-exp
if m.group('sign') == '-':
numerator = -numerator
else:
raise TypeError("argument should be a string "
"or a Rational instance")
elif (isinstance(numerator, numbers.Rational) and
isinstance(denominator, numbers.Rational)):
numerator, denominator = (
numerator.numerator * denominator.denominator,
denominator.numerator * numerator.denominator
)
else:
raise TypeError("both arguments should be "
"Rational instances")
if denominator == 0:
raise ZeroDivisionError('Fraction(%s, 0)' % numerator)
g = gcd(numerator, denominator)
self._numerator = numerator // g
self._denominator = denominator // g
return self
@classmethod
def from_float(cls, f):
"""Converts a finite float to a rational number, exactly.
Beware that Fraction.from_float(0.3) != Fraction(3, 10).
"""
if isinstance(f, numbers.Integral):
return cls(f)
elif not isinstance(f, float):
raise TypeError("%s.from_float() only takes floats, not %r (%s)" %
(cls.__name__, f, type(f).__name__))
if math.isnan(f):
raise ValueError("Cannot convert %r to %s." % (f, cls.__name__))
if math.isinf(f):
raise OverflowError("Cannot convert %r to %s." % (f, cls.__name__))
return cls(*f.as_integer_ratio())
@classmethod
def from_decimal(cls, dec):
"""Converts a finite Decimal instance to a rational number, exactly."""
from decimal import Decimal
if isinstance(dec, numbers.Integral):
dec = Decimal(int(dec))
elif not isinstance(dec, Decimal):
raise TypeError(
"%s.from_decimal() only takes Decimals, not %r (%s)" %
(cls.__name__, dec, type(dec).__name__))
if dec.is_infinite():
raise OverflowError(
"Cannot convert %s to %s." % (dec, cls.__name__))
if dec.is_nan():
raise ValueError("Cannot convert %s to %s." % (dec, cls.__name__))
sign, digits, exp = dec.as_tuple()
digits = int(''.join(map(str, digits)))
if sign:
digits = -digits
if exp >= 0:
return cls(digits * 10 ** exp)
else:
return cls(digits, 10 ** -exp)
def limit_denominator(self, max_denominator=1000000):
"""Closest Fraction to self with denominator at most max_denominator.
>>> Fraction('3.141592653589793').limit_denominator(10)
Fraction(22, 7)
>>> Fraction('3.141592653589793').limit_denominator(100)
Fraction(311, 99)
>>> Fraction(4321, 8765).limit_denominator(10000)
Fraction(4321, 8765)
"""
# Algorithm notes: For any real number x, define a *best upper
# approximation* to x to be a rational number p/q such that:
#
# (1) p/q >= x, and
# (2) if p/q > r/s >= x then s > q, for any rational r/s.
#
# Define *best lower approximation* similarly. Then it can be
# proved that a rational number is a best upper or lower
# approximation to x if, and only if, it is a convergent or
# semiconvergent of the (unique shortest) continued fraction
# associated to x.
#
# To find a best rational approximation with denominator <= M,
# we find the best upper and lower approximations with
# denominator <= M and take whichever of these is closer to x.
# In the event of a tie, the bound with smaller denominator is
# chosen. If both denominators are equal (which can happen
# only when max_denominator == 1 and self is midway between
# two integers) the lower bound---i.e., the floor of self, is
# taken.
if max_denominator < 1:
raise ValueError("max_denominator should be at least 1")
if self._denominator <= max_denominator:
return Fraction(self)
p0, q0, p1, q1 = 0, 1, 1, 0
n, d = self._numerator, self._denominator
while True:
a = n//d
q2 = q0+a*q1
if q2 > max_denominator:
break
p0, q0, p1, q1 = p1, q1, p0+a*p1, q2
n, d = d, n-a*d
k = (max_denominator-q0)//q1
bound1 = Fraction(p0+k*p1, q0+k*q1)
bound2 = Fraction(p1, q1)
if abs(bound2 - self) <= abs(bound1-self):
return bound2
else:
return bound1
@property
def numerator(a):
return a._numerator
@property
def denominator(a):
return a._denominator
def __repr__(self):
"""repr(self)"""
return ('Fraction(%s, %s)' % (self._numerator, self._denominator))
def __str__(self):
"""str(self)"""
if self._denominator == 1:
return str(self._numerator)
else:
return '%s/%s' % (self._numerator, self._denominator)
def _operator_fallbacks(monomorphic_operator, fallback_operator):
"""Generates forward and reverse operators given a purely-rational
operator and a function from the operator module.
Use this like:
__op__, __rop__ = _operator_fallbacks(just_rational_op, operator.op)
In general, we want to implement the arithmetic operations so
that mixed-mode operations either call an implementation whose
author knew about the types of both arguments, or convert both
to the nearest built in type and do the operation there. In
Fraction, that means that we define __add__ and __radd__ as:
def __add__(self, other):
# Both types have numerators/denominator attributes,
# so do the operation directly
if isinstance(other, (int, Fraction)):
return Fraction(self.numerator * other.denominator +
other.numerator * self.denominator,
self.denominator * other.denominator)
# float and complex don't have those operations, but we
# know about those types, so special case them.
elif isinstance(other, float):
return float(self) + other
elif isinstance(other, complex):
return complex(self) + other
# Let the other type take over.
return NotImplemented
def __radd__(self, other):
# radd handles more types than add because there's
# nothing left to fall back to.
if isinstance(other, numbers.Rational):
return Fraction(self.numerator * other.denominator +
other.numerator * self.denominator,
self.denominator * other.denominator)
elif isinstance(other, Real):
return float(other) + float(self)
elif isinstance(other, Complex):
return complex(other) + complex(self)
return NotImplemented
There are 5 different cases for a mixed-type addition on
Fraction. I'll refer to all of the above code that doesn't
refer to Fraction, float, or complex as "boilerplate". 'r'
will be an instance of Fraction, which is a subtype of
Rational (r : Fraction <: Rational), and b : B <:
Complex. The first three involve 'r + b':
1. If B <: Fraction, int, float, or complex, we handle
that specially, and all is well.
2. If Fraction falls back to the boilerplate code, and it
were to return a value from __add__, we'd miss the
possibility that B defines a more intelligent __radd__,
so the boilerplate should return NotImplemented from
__add__. In particular, we don't handle Rational
here, even though we could get an exact answer, in case
the other type wants to do something special.
3. If B <: Fraction, Python tries B.__radd__ before
Fraction.__add__. This is ok, because it was
implemented with knowledge of Fraction, so it can
handle those instances before delegating to Real or
Complex.
The next two situations describe 'b + r'. We assume that b
didn't know about Fraction in its implementation, and that it
uses similar boilerplate code:
4. If B <: Rational, then __radd_ converts both to the
builtin rational type (hey look, that's us) and
proceeds.
5. Otherwise, __radd__ tries to find the nearest common
base ABC, and fall back to its builtin type. Since this
class doesn't subclass a concrete type, there's no
implementation to fall back to, so we need to try as
hard as possible to return an actual value, or the user
will get a TypeError.
"""
def forward(a, b):
if isinstance(b, (int, Fraction)):
return monomorphic_operator(a, b)
elif isinstance(b, float):
return fallback_operator(float(a), b)
elif isinstance(b, complex):
return fallback_operator(complex(a), b)
else:
return NotImplemented
forward.__name__ = '__' + fallback_operator.__name__ + '__'
forward.__doc__ = monomorphic_operator.__doc__
def reverse(b, a):
if isinstance(a, numbers.Rational):
# Includes ints.
return monomorphic_operator(a, b)
elif isinstance(a, numbers.Real):
return fallback_operator(float(a), float(b))
elif isinstance(a, numbers.Complex):
return fallback_operator(complex(a), complex(b))
else:
return NotImplemented
reverse.__name__ = '__r' + fallback_operator.__name__ + '__'
reverse.__doc__ = monomorphic_operator.__doc__
return forward, reverse
def _add(a, b):
"""a + b"""
return Fraction(a.numerator * b.denominator +
b.numerator * a.denominator,
a.denominator * b.denominator)
__add__, __radd__ = _operator_fallbacks(_add, operator.add)
def _sub(a, b):
"""a - b"""
return Fraction(a.numerator * b.denominator -
b.numerator * a.denominator,
a.denominator * b.denominator)
__sub__, __rsub__ = _operator_fallbacks(_sub, operator.sub)
def _mul(a, b):
"""a * b"""
return Fraction(a.numerator * b.numerator, a.denominator * b.denominator)
__mul__, __rmul__ = _operator_fallbacks(_mul, operator.mul)
def _div(a, b):
"""a / b"""
return Fraction(a.numerator * b.denominator,
a.denominator * b.numerator)
__truediv__, __rtruediv__ = _operator_fallbacks(_div, operator.truediv)
def __floordiv__(a, b):
"""a // b"""
return math.floor(a / b)
def __rfloordiv__(b, a):
"""a // b"""
return math.floor(a / b)
def __mod__(a, b):
"""a % b"""
div = a // b
return a - b * div
def __rmod__(b, a):
"""a % b"""
div = a // b
return a - b * div
def __pow__(a, b):
"""a ** b
If b is not an integer, the result will be a float or complex
since roots are generally irrational. If b is an integer, the
result will be rational.
"""
if isinstance(b, numbers.Rational):
if b.denominator == 1:
power = b.numerator
if power >= 0:
return Fraction(a._numerator ** power,
a._denominator ** power)
else:
return Fraction(a._denominator ** -power,
a._numerator ** -power)
else:
# A fractional power will generally produce an
# irrational number.
return float(a) ** float(b)
else:
return float(a) ** b
def __rpow__(b, a):
"""a ** b"""
if b._denominator == 1 and b._numerator >= 0:
# If a is an int, keep it that way if possible.
return a ** b._numerator
if isinstance(a, numbers.Rational):
return Fraction(a.numerator, a.denominator) ** b
if b._denominator == 1:
return a ** b._numerator
return a ** float(b)
def __pos__(a):
"""+a: Coerces a subclass instance to Fraction"""
return Fraction(a._numerator, a._denominator)
def __neg__(a):
"""-a"""
return Fraction(-a._numerator, a._denominator)
def __abs__(a):
"""abs(a)"""
return Fraction(abs(a._numerator), a._denominator)
def __trunc__(a):
"""trunc(a)"""
if a._numerator < 0:
return -(-a._numerator // a._denominator)
else:
return a._numerator // a._denominator
def __floor__(a):
"""Will be math.floor(a) in 3.0."""
return a.numerator // a.denominator
def __ceil__(a):
"""Will be math.ceil(a) in 3.0."""
# The negations cleverly convince floordiv to return the ceiling.
return -(-a.numerator // a.denominator)
def __round__(self, ndigits=None):
"""Will be round(self, ndigits) in 3.0.
Rounds half toward even.
"""
if ndigits is None:
floor, remainder = divmod(self.numerator, self.denominator)
if remainder * 2 < self.denominator:
return floor
elif remainder * 2 > self.denominator:
return floor + 1
# Deal with the half case:
elif floor % 2 == 0:
return floor
else:
return floor + 1
shift = 10**abs(ndigits)
# See _operator_fallbacks.forward to check that the results of
# these operations will always be Fraction and therefore have
# round().
if ndigits > 0:
return Fraction(round(self * shift), shift)
else:
return Fraction(round(self / shift) * shift)
def __hash__(self):
"""hash(self)"""
# XXX since this method is expensive, consider caching the result
# In order to make sure that the hash of a Fraction agrees
# with the hash of a numerically equal integer, float or
# Decimal instance, we follow the rules for numeric hashes
# outlined in the documentation. (See library docs, 'Built-in
# Types').
# dinv is the inverse of self._denominator modulo the prime
# _PyHASH_MODULUS, or 0 if self._denominator is divisible by
# _PyHASH_MODULUS.
dinv = pow(self._denominator, _PyHASH_MODULUS - 2, _PyHASH_MODULUS)
if not dinv:
hash_ = _PyHASH_INF
else:
hash_ = abs(self._numerator) * dinv % _PyHASH_MODULUS
result = hash_ if self >= 0 else -hash_
return -2 if result == -1 else result
def __eq__(a, b):
"""a == b"""
if isinstance(b, numbers.Rational):
return (a._numerator == b.numerator and
a._denominator == b.denominator)
if isinstance(b, numbers.Complex) and b.imag == 0:
b = b.real
if isinstance(b, float):
if math.isnan(b) or math.isinf(b):
# comparisons with an infinity or nan should behave in
# the same way for any finite a, so treat a as zero.
return 0.0 == b
else:
return a == a.from_float(b)
else:
# Since a doesn't know how to compare with b, let's give b
# a chance to compare itself with a.
return NotImplemented
def _richcmp(self, other, op):
"""Helper for comparison operators, for internal use only.
Implement comparison between a Rational instance `self`, and
either another Rational instance or a float `other`. If
`other` is not a Rational instance or a float, return
NotImplemented. `op` should be one of the six standard
comparison operators.
"""
# convert other to a Rational instance where reasonable.
if isinstance(other, numbers.Rational):
return op(self._numerator * other.denominator,
self._denominator * other.numerator)
if isinstance(other, float):
if math.isnan(other) or math.isinf(other):
return op(0.0, other)
else:
return op(self, self.from_float(other))
else:
return NotImplemented
def __lt__(a, b):
"""a < b"""
return a._richcmp(b, operator.lt)
def __gt__(a, b):
"""a > b"""
return a._richcmp(b, operator.gt)
def __le__(a, b):
"""a <= b"""
return a._richcmp(b, operator.le)
def __ge__(a, b):
"""a >= b"""
return a._richcmp(b, operator.ge)
def __bool__(a):
"""a != 0"""
return a._numerator != 0
# support for pickling, copy, and deepcopy
def __reduce__(self):
return (self.__class__, (str(self),))
def __copy__(self):
if type(self) == Fraction:
return self # I'm immutable; therefore I am my own clone
return self.__class__(self._numerator, self._denominator)
def __deepcopy__(self, memo):
if type(self) == Fraction:
return self # My components are also immutable
return self.__class__(self._numerator, self._denominator)
| gpl-3.0 |
sgallagher/anaconda | pyanaconda/core/constants.py | 1 | 15488 | #
# constants.py: anaconda constants
#
# Copyright (C) 2001 Red Hat, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Used for digits, ascii_letters, punctuation constants
import string # pylint: disable=deprecated-module
from pyanaconda.core.i18n import N_
from enum import Enum
# Use -1 to indicate that the selinux configuration is unset
SELINUX_DEFAULT = -1
# where to look for 3rd party addons
ADDON_PATHS = ["/usr/share/anaconda/addons"]
# common string needs to be easy to change
from pyanaconda import product
productName = product.productName
productVersion = product.productVersion
productArch = product.productArch
isFinal = product.isFinal
# for use in device names, eg: "fedora", "rhel"
shortProductName = productName.lower() # pylint: disable=no-member
if productName.count(" "): # pylint: disable=no-member
shortProductName = ''.join(s[0] for s in shortProductName.split())
TRANSLATIONS_UPDATE_DIR = "/tmp/updates/po"
# The default virtio port.
VIRTIO_PORT = "/dev/virtio-ports/org.fedoraproject.anaconda.log.0"
ANACONDA_CLEANUP = "anaconda-cleanup"
MOUNT_DIR = "/run/install"
DRACUT_REPODIR = "/run/install/repo"
DRACUT_ISODIR = "/run/install/source"
ISO_DIR = MOUNT_DIR + "/isodir"
IMAGE_DIR = MOUNT_DIR + "/image"
INSTALL_TREE = MOUNT_DIR + "/source"
SOURCES_DIR = MOUNT_DIR + "/sources"
BASE_REPO_NAME = "anaconda"
# Get list of repo names witch should be used as base repo
DEFAULT_REPOS = [productName.split('-')[0].lower(),
"fedora-modular-server",
"rawhide",
"BaseOS"]
# Get list of repo names which should be used as updates repos
DEFAULT_UPDATE_REPOS = ["updates",
"updates-modular"]
DBUS_ANACONDA_SESSION_ADDRESS = "DBUS_ANACONDA_SESSION_BUS_ADDRESS"
ANACONDA_BUS_CONF_FILE = "/usr/share/anaconda/dbus/anaconda-bus.conf"
ANACONDA_BUS_ADDR_FILE = "/run/anaconda/bus.address"
ANACONDA_DATA_DIR = "/usr/share/anaconda"
ANACONDA_CONFIG_DIR = "/etc/anaconda/"
ANACONDA_CONFIG_TMP = "/run/anaconda/anaconda.conf"
# NOTE: this should be LANG_TERRITORY.CODESET, e.g. en_US.UTF-8
DEFAULT_LANG = "en_US.UTF-8"
DEFAULT_VC_FONT = "eurlatgr"
DEFAULT_KEYBOARD = "us"
DRACUT_SHUTDOWN_EJECT = "/run/initramfs/usr/lib/dracut/hooks/shutdown/99anaconda-eject.sh"
# Help.
HELP_MAIN_PAGE_GUI = "Installation_Guide.xml"
HELP_MAIN_PAGE_TUI = "Installation_Guide.txt"
# VNC questions
USEVNC = N_("Start VNC")
USETEXT = N_("Use text mode")
# Quit message
QUIT_MESSAGE = N_("Do you really want to quit?")
# Runlevel files
TEXT_ONLY_TARGET = 'multi-user.target'
GRAPHICAL_TARGET = 'graphical.target'
# Network
# Requests package (where this constant is used) recommends to have timeout slightly
# above multiple of 3 because of it is default packet re-transmission window.
# See: https://3.python-requests.org/user/advanced/#timeouts
NETWORK_CONNECTION_TIMEOUT = 46 # in seconds
NETWORK_CONNECTED_CHECK_INTERVAL = 0.1 # in seconds
# DBus
DEFAULT_DBUS_TIMEOUT = -1 # use default
# Thread names
THREAD_EXECUTE_STORAGE = "AnaExecuteStorageThread"
THREAD_STORAGE = "AnaStorageThread"
THREAD_STORAGE_WATCHER = "AnaStorageWatcher"
THREAD_WAIT_FOR_CONNECTING_NM = "AnaWaitForConnectingNMThread"
THREAD_PAYLOAD = "AnaPayloadThread"
THREAD_PAYLOAD_RESTART = "AnaPayloadRestartThread"
THREAD_EXCEPTION_HANDLING_TEST = "AnaExceptionHandlingTest"
THREAD_LIVE_PROGRESS = "AnaLiveProgressThread"
THREAD_SOFTWARE_WATCHER = "AnaSoftwareWatcher"
THREAD_CHECK_SOFTWARE = "AnaCheckSoftwareThread"
THREAD_SOURCE_WATCHER = "AnaSourceWatcher"
THREAD_INSTALL = "AnaInstallThread"
THREAD_GEOLOCATION_REFRESH = "AnaGeolocationRefreshThread"
THREAD_DATE_TIME = "AnaDateTimeThread"
THREAD_TIME_INIT = "AnaTimeInitThread"
THREAD_DASDFMT = "AnaDasdfmtThread"
THREAD_KEYBOARD_INIT = "AnaKeyboardThread"
THREAD_ADD_LAYOUTS_INIT = "AnaAddLayoutsInitThread"
THREAD_NTP_SERVER_CHECK = "AnaNTPserver"
THREAD_DBUS_TASK = "AnaTaskThread"
THREAD_SUBSCRIPTION = "AnaSubscriptionThread"
# Geolocation constants
# geolocation providers
# - values are used by the geoloc CLI/boot option
GEOLOC_PROVIDER_FEDORA_GEOIP = "provider_fedora_geoip"
GEOLOC_PROVIDER_HOSTIP = "provider_hostip"
GEOLOC_PROVIDER_GOOGLE_WIFI = "provider_google_wifi"
# geocoding provider
GEOLOC_GEOCODER_NOMINATIM = "geocoder_nominatim"
# default providers
GEOLOC_DEFAULT_PROVIDER = GEOLOC_PROVIDER_FEDORA_GEOIP
GEOLOC_DEFAULT_GEOCODER = GEOLOC_GEOCODER_NOMINATIM
# timeout (in seconds)
GEOLOC_TIMEOUT = 3
ANACONDA_ENVIRON = "anaconda"
FIRSTBOOT_ENVIRON = "firstboot"
# Tainted hardware
TAINT_SUPPORT_REMOVED = 27
TAINT_HARDWARE_UNSUPPORTED = 28
WARNING_SUPPORT_REMOVED = N_(
"Support for this hardware has been removed in this major OS release. Please check the"
"removed functionality section of the release notes."
)
WARNING_HARDWARE_UNSUPPORTED = N_(
"This hardware (or a combination thereof) is not supported by Red Hat. For more information "
"on supported hardware, please refer to http://www.redhat.com/hardware."
)
# Storage messages
WARNING_NO_DISKS_DETECTED = N_(
"No disks detected. Please shut down the computer, connect at least one disk, and restart "
"to complete installation."
)
WARNING_NO_DISKS_SELECTED = N_(
"No disks selected; please select at least one disk to install to."
)
# Kernel messages.
WARNING_SMT_ENABLED_GUI = N_(
"Simultaneous Multithreading (SMT) technology can provide performance "
"improvements for certain workloads, but introduces several publicly "
"disclosed security issues. You have the option of disabling SMT, which "
"may impact performance. If you choose to leave SMT enabled, please read "
"https://red.ht/rhel-smt to understand your potential risks and learn "
"about other ways to mitigate these risks."
)
# This message is shorter to fit on the screen.
WARNING_SMT_ENABLED_TUI = N_(
"Simultaneous Multithreading (SMT) may improve performance for certain "
"workloads, but introduces several publicly disclosed security issues. "
"You can disable SMT, which may impact performance. Please read "
"https://red.ht/rhel-smt to understand potential risks and learn about "
"ways to mitigate these risks."
)
# Password type
class SecretType(Enum):
PASSWORD = "password"
PASSPHRASE = "passphrase"
# Password validation
SECRET_EMPTY_ERROR = {
SecretType.PASSWORD : N_("The password is empty."),
SecretType.PASSPHRASE : N_("The passphrase is empty.")
}
SECRET_CONFIRM_ERROR_GUI = {
SecretType.PASSWORD : N_("The passwords do not match."),
SecretType.PASSPHRASE : N_("The passphrases do not match.")
}
SECRET_CONFIRM_ERROR_TUI = {
SecretType.PASSWORD : N_("The passwords you entered were different. Please try again."),
SecretType.PASSPHRASE : N_("The passphrases you entered were different. Please try again.")
}
# The secret-too-short constants is used to replace a libpwquality error message,
# which is why it does not end with a ".", like all the other do.
SECRET_TOO_SHORT = {
SecretType.PASSWORD : N_("The password is too short"),
SecretType.PASSPHRASE : N_("The passphrase is too short")
}
SECRET_WEAK = {
SecretType.PASSWORD : N_("The password you have provided is weak."),
SecretType.PASSPHRASE : N_("The passphrase you have provided is weak.")
}
SECRET_WEAK_WITH_ERROR = {
SecretType.PASSWORD : N_("The password you have provided is weak:"),
SecretType.PASSPHRASE : N_("The passphrase you have provided is weak:")
}
PASSWORD_FINAL_CONFIRM = N_("Press <b>Done</b> again to use the password anyway.")
SECRET_ASCII = {
SecretType.PASSWORD : N_("The password you have provided contains non-ASCII characters. You may not be able to switch between keyboard layouts when typing it."),
SecretType.PASSPHRASE : N_("The passphrase you have provided contains non-ASCII characters. You may not be able to switch between keyboard layouts when typing it.")
}
PASSWORD_DONE_TWICE = N_("You will have to press <b>Done</b> twice to confirm it.")
PASSWORD_SET = N_("Password set.")
class SecretStatus(Enum):
EMPTY = N_("Empty")
TOO_SHORT = N_("Too short")
WEAK = N_("Weak")
FAIR = N_("Fair")
GOOD = N_("Good")
STRONG = N_("Strong")
PASSWORD_HIDE = N_("Hide password.")
PASSWORD_SHOW = N_("Show password.")
PASSWORD_HIDE_ICON = "anaconda-password-show-off"
PASSWORD_SHOW_ICON = "anaconda-password-show-on"
# the number of seconds we consider a noticeable freeze of the UI
NOTICEABLE_FREEZE = 0.1
# all ASCII characters
PW_ASCII_CHARS = string.digits + string.ascii_letters + string.punctuation + " "
# Recognizing a tarfile
TAR_SUFFIX = (".tar", ".tbz", ".tgz", ".txz", ".tar.bz2", "tar.gz", "tar.xz")
# screenshots
SCREENSHOTS_DIRECTORY = "/tmp/anaconda-screenshots"
SCREENSHOTS_TARGET_DIRECTORY = "/root/anaconda-screenshots"
CMDLINE_FILES = [
"/proc/cmdline",
"/run/install/cmdline",
"/run/install/cmdline.d/*.conf",
"/etc/cmdline"
]
# cmdline arguments that append instead of overwrite
CMDLINE_APPEND = ["modprobe.blacklist", "ifname", "ip"]
CMDLINE_LIST = ["addrepo"]
# Filesystems which are not supported by Anaconda
UNSUPPORTED_FILESYSTEMS = ("btrfs", "ntfs", "tmpfs")
# Default to these units when reading user input when no units given
SIZE_UNITS_DEFAULT = "MiB"
# An estimated ratio for metadata size to total disk space.
STORAGE_METADATA_RATIO = 0.1
# Constants for reporting status to IPMI. These are from the IPMI spec v2 rev1.1, page 512.
IPMI_STARTED = 0x7 # installation started
IPMI_FINISHED = 0x8 # installation finished successfully
IPMI_ABORTED = 0x9 # installation finished unsuccessfully, due to some non-exn error
IPMI_FAILED = 0xA # installation hit an exception
# X display number to use
X_DISPLAY_NUMBER = 1
# Payload status messages
PAYLOAD_STATUS_PROBING_STORAGE = N_("Probing storage...")
PAYLOAD_STATUS_TESTING_AVAILABILITY = N_("Testing availability...")
PAYLOAD_STATUS_PACKAGE_MD = N_("Downloading package metadata...")
PAYLOAD_STATUS_GROUP_MD = N_("Downloading group metadata...")
# Window title text
WINDOW_TITLE_TEXT = N_("Anaconda Installer")
# Types of time sources.
TIME_SOURCE_SERVER = "SERVER"
TIME_SOURCE_POOL = "POOL"
# NTP server checking
NTP_SERVER_OK = 0
NTP_SERVER_NOK = 1
NTP_SERVER_QUERY = 2
# Timeout for the NTP server check
NTP_SERVER_TIMEOUT = 5
# Storage checker constraints
STORAGE_MIN_RAM = "min_ram"
STORAGE_ROOT_DEVICE_TYPES = "root_device_types"
STORAGE_MIN_PARTITION_SIZES = "min_partition_sizes"
STORAGE_REQ_PARTITION_SIZES = "req_partition_sizes"
STORAGE_MUST_BE_ON_LINUXFS = "must_be_on_linuxfs"
STORAGE_MUST_BE_ON_ROOT = "must_be_on_root"
STORAGE_MUST_NOT_BE_ON_ROOT = "must_not_be_on_root"
STORAGE_REFORMAT_ALLOWLIST = "reformat_allowlist"
STORAGE_REFORMAT_BLOCKLIST = "reformat_blocklist"
STORAGE_SWAP_IS_RECOMMENDED = "swap_is_recommended"
STORAGE_LUKS2_MIN_RAM = "luks2_min_ram"
# Display modes
class DisplayModes(Enum):
GUI = "GUI"
TUI = "TUI"
DISPLAY_MODE_NAME = {
DisplayModes.GUI: "graphical mode",
DisplayModes.TUI: "text mode"
}
INTERACTIVE_MODE_NAME = {
True: "interactive",
False: "noninteractive"
}
# Loggers
LOGGER_ANACONDA_ROOT = "anaconda"
LOGGER_MAIN = "anaconda.main"
LOGGER_STDOUT = "anaconda.stdout"
LOGGER_PROGRAM = "program"
LOGGER_STORAGE = "storage"
LOGGER_PACKAGING = "packaging"
LOGGER_DNF = "dnf"
LOGGER_BLIVET = "blivet"
LOGGER_SIMPLELINE = "simpleline"
LOGGER_SENSITIVE_INFO = "sensitive_info"
class PayloadRequirementType(Enum):
package = "package"
group = "group"
# Timeout for starting X
X_TIMEOUT = 60
# Setup on boot actions.
SETUP_ON_BOOT_DEFAULT = -1
SETUP_ON_BOOT_DISABLED = 0
SETUP_ON_BOOT_ENABLED = 1
SETUP_ON_BOOT_RECONFIG = 2
# Clear partitions modes.
CLEAR_PARTITIONS_DEFAULT = -1
CLEAR_PARTITIONS_NONE = 0
CLEAR_PARTITIONS_ALL = 1
CLEAR_PARTITIONS_LIST = 2
CLEAR_PARTITIONS_LINUX = 3
# Bootloader modes.
BOOTLOADER_DISABLED = 0
BOOTLOADER_ENABLED = 1
BOOTLOADER_SKIPPED = 2
# Bootloader locations.
BOOTLOADER_LOCATION_DEFAULT = "DEFAULT"
BOOTLOADER_LOCATION_PARTITION = "PARTITION"
BOOTLOADER_LOCATION_MBR = "MBR"
# Bootloader timeout.
BOOTLOADER_TIMEOUT_UNSET = -1
# Bootloader drive.
BOOTLOADER_DRIVE_UNSET = ""
# Firewall mode.
FIREWALL_DEFAULT = -1
FIREWALL_DISABLED = 0
FIREWALL_ENABLED = 1
FIREWALL_USE_SYSTEM_DEFAULTS = 2
# Iscsi interface mode.
ISCSI_INTERFACE_UNSET = "none"
ISCSI_INTERFACE_DEFAULT = "default"
ISCSI_INTERFACE_IFACENAME = "bind"
# Partitioning methods.
PARTITIONING_METHOD_AUTOMATIC = "AUTOMATIC"
PARTITIONING_METHOD_CUSTOM = "CUSTOM"
PARTITIONING_METHOD_MANUAL = "MANUAL"
PARTITIONING_METHOD_INTERACTIVE = "INTERACTIVE"
PARTITIONING_METHOD_BLIVET = "BLIVET"
# Types of secret data.
SECRET_TYPE_NONE = "NONE"
SECRET_TYPE_HIDDEN = "HIDDEN"
SECRET_TYPE_TEXT = "TEXT"
# Types of the payload.
PAYLOAD_TYPE_DNF = "DNF"
PAYLOAD_TYPE_LIVE_OS = "LIVE_OS"
PAYLOAD_TYPE_LIVE_IMAGE = "LIVE_IMAGE"
PAYLOAD_TYPE_RPM_OSTREE = "RPM_OSTREE"
# All live types of the payload.
PAYLOAD_LIVE_TYPES = (
PAYLOAD_TYPE_LIVE_OS,
PAYLOAD_TYPE_LIVE_IMAGE
)
# Types of the payload source.
SOURCE_TYPE_LIVE_OS_IMAGE = "LIVE_OS_IMAGE"
SOURCE_TYPE_HMC = "HMC"
SOURCE_TYPE_CDROM = "CDROM"
SOURCE_TYPE_CLOSEST_MIRROR = "CLOSEST_MIRROR"
SOURCE_TYPE_REPO_FILES = "REPO_FILES"
SOURCE_TYPE_NFS = "NFS"
SOURCE_TYPE_URL = "URL"
SOURCE_TYPE_HDD = "HDD"
SOURCE_TYPE_CDN = "CDN"
# All types that use repo files.
SOURCE_REPO_FILE_TYPES = (
SOURCE_TYPE_REPO_FILES,
SOURCE_TYPE_CLOSEST_MIRROR,
SOURCE_TYPE_CDN,
)
# Payload URL source types.
URL_TYPE_BASEURL = "BASEURL"
URL_TYPE_MIRRORLIST = "MIRRORLIST"
URL_TYPE_METALINK = "METALINK"
# The default source for the DNF payload.
DNF_DEFAULT_SOURCE_TYPE = SOURCE_TYPE_CLOSEST_MIRROR
# Default values of DNF repository configuration
DNF_DEFAULT_REPO_COST = 1000
# Subscription request types
#
# Subscription request can currently be one of two types:
# - using username and password for authentication
# - using organization id and one or more authentication keys
# for authentication
SUBSCRIPTION_REQUEST_TYPE_USERNAME_PASSWORD = "username_password"
SUBSCRIPTION_REQUEST_TYPE_ORG_KEY = "org_activation_key"
SUBSCRIPTION_REQUEST_VALID_TYPES = {
SUBSCRIPTION_REQUEST_TYPE_USERNAME_PASSWORD,
SUBSCRIPTION_REQUEST_TYPE_ORG_KEY,
}
# Default authentication for subscription requests is
# username password - this is basically to avoid the invalid
# case of request not having a type set.
DEFAULT_SUBSCRIPTION_REQUEST_TYPE = SUBSCRIPTION_REQUEST_TYPE_USERNAME_PASSWORD
# How long to wait for the RHSM service to become available after it is started.
# - in seconds
# - based on the default 90 second systemd service activation timeout
RHSM_SERVICE_TIMEOUT = 90.0
# Path to the System Purpose configuration file on a system.
RHSM_SYSPURPOSE_FILE_PATH = "/etc/rhsm/syspurpose/syspurpose.json"
| gpl-2.0 |
drnextgis/QGIS | python/plugins/processing/algs/taudem/dinftranslimaccum2.py | 5 | 5539 | # -*- coding: utf-8 -*-
"""
***************************************************************************
dinftranslimaccum2.py
---------------------
Date : October 2012
Copyright : (C) 2012 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from future import standard_library
standard_library.install_aliases()
from builtins import str
__author__ = 'Alexander Bruy'
__date__ = 'October 2012'
__copyright__ = '(C) 2012, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgis.PyQt.QtGui import QIcon
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.ProcessingConfig import ProcessingConfig
from processing.core.GeoAlgorithmExecutionException import \
GeoAlgorithmExecutionException
from processing.core.parameters import ParameterRaster
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterBoolean
from processing.core.outputs import OutputRaster
from processing.tools import dataobjects
from .TauDEMUtils import TauDEMUtils
class DinfTransLimAccum2(GeoAlgorithm):
DINF_FLOW_DIR_GRID = 'DINF_FLOW_DIR_GRID'
SUPPLY_GRID = 'SUPPLY_GRID'
CAPACITY_GRID = 'CAPACITY_GRID'
IN_CONCENTR_GRID = 'IN_CONCENTR_GRID'
OUTLETS_SHAPE = 'OUTLETS_SHAPE'
EDGE_CONTAM = 'EDGE_CONTAM'
TRANSP_LIM_ACCUM_GRID = 'TRANSP_LIM_ACCUM_GRID'
DEPOSITION_GRID = 'DEPOSITION_GRID'
OUT_CONCENTR_GRID = 'OUT_CONCENTR_GRID'
def getIcon(self):
return QIcon(os.path.dirname(__file__) + '/../../images/taudem.svg')
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('D-Infinity Transport Limited Accumulation - 2')
self.cmdName = 'dinftranslimaccum'
self.group, self.i18n_group = self.trAlgorithm('Specialized Grid Analysis tools')
self.addParameter(ParameterRaster(self.DINF_FLOW_DIR_GRID,
self.tr('D-Infinity Flow Direction Grid'), False))
self.addParameter(ParameterRaster(self.SUPPLY_GRID,
self.tr('Supply Grid'), False))
self.addParameter(ParameterRaster(self.CAPACITY_GRID,
self.tr('Transport Capacity Grid'), False))
self.addParameter(ParameterRaster(self.IN_CONCENTR_GRID,
self.tr('Input Concentration Grid'), False))
self.addParameter(ParameterVector(self.OUTLETS_SHAPE,
self.tr('Outlets Shapefile'),
[dataobjects.TYPE_VECTOR_POINT], True))
self.addParameter(ParameterBoolean(self.EDGE_CONTAM,
self.tr('Check for edge contamination'), True))
self.addOutput(OutputRaster(self.TRANSP_LIM_ACCUM_GRID,
self.tr('Transport Limited Accumulation Grid')))
self.addOutput(OutputRaster(self.DEPOSITION_GRID,
self.tr('Deposition Grid')))
self.addOutput(OutputRaster(self.OUT_CONCENTR_GRID,
self.tr('Output Concentration Grid')))
def processAlgorithm(self, progress):
commands = []
commands.append(os.path.join(TauDEMUtils.mpiexecPath(), 'mpiexec'))
processNum = ProcessingConfig.getSetting(TauDEMUtils.MPI_PROCESSES)
if processNum <= 0:
raise GeoAlgorithmExecutionException(
self.tr('Wrong number of MPI processes used. Please set '
'correct number before running TauDEM algorithms.'))
commands.append('-n')
commands.append(str(processNum))
commands.append(os.path.join(TauDEMUtils.taudemPath(), self.cmdName))
commands.append('-ang')
commands.append(self.getParameterValue(self.DINF_FLOW_DIR_GRID))
commands.append('-tsup')
commands.append(self.getParameterValue(self.SUPPLY_GRID))
commands.append('-tc')
commands.append(self.getParameterValue(self.CAPACITY_GRID))
commands.append('-cs')
commands.append(self.getParameterValue(self.IN_CONCENTR_GRID))
param = self.getParameterValue(self.OUTLETS_SHAPE)
if param is not None:
commands.append('-o')
commands.append(param)
if not self.getParameterValue(self.EDGE_CONTAM):
commands.append('-nc')
commands.append('-tla')
commands.append(self.getOutputValue(self.TRANSP_LIM_ACCUM_GRID))
commands.append('-tdep')
commands.append(self.getOutputValue(self.DEPOSITION_GRID))
commands.append('-ctpt')
commands.append(self.getOutputValue(self.OUT_CONCENTR_GRID))
TauDEMUtils.executeTauDEM(commands, progress)
| gpl-2.0 |
hmen89/odoo | openerp/addons/base/module/wizard/base_module_configuration.py | 447 | 2274 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
from openerp.tools.translate import _
class base_module_configuration(osv.osv_memory):
_name = "base.module.configuration"
def start(self, cr, uid, ids, context=None):
todo_ids = self.pool.get('ir.actions.todo').search(cr, uid,
['|', ('type','=','recurring'), ('state', '=', 'open')])
if not todo_ids:
# When there is no wizard todo it will display message
data_obj = self.pool.get('ir.model.data')
result = data_obj._get_id(cr, uid, 'base', 'view_base_module_configuration_form')
view_id = data_obj.browse(cr, uid, result).res_id
value = {
'name': _('System Configuration done'),
'view_type': 'form',
'view_mode': 'form',
'res_model': 'base.module.configuration',
'view_id': [view_id],
'type': 'ir.actions.act_window',
'target': 'new'
}
return value
# Run the config wizards
config_pool = self.pool.get('res.config')
return config_pool.start(cr, uid, ids, context=context)
base_module_configuration()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
dhanunjaya/neutron | neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_phys.py | 19 | 2828 | # Copyright (C) 2014,2015 VA Linux Systems Japan K.K.
# Copyright (C) 2014,2015 YAMAMOTO Takashi <yamamoto at valinux co jp>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants
from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native \
import br_dvr_process
from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native \
import ovs_bridge
class OVSPhysicalBridge(ovs_bridge.OVSAgentBridge,
br_dvr_process.OVSDVRProcessMixin):
"""openvswitch agent physical bridge specific logic."""
# Used by OVSDVRProcessMixin
dvr_process_table_id = constants.DVR_PROCESS_VLAN
dvr_process_next_table_id = constants.LOCAL_VLAN_TRANSLATION
def setup_default_table(self):
self.delete_flows()
self.install_normal()
@staticmethod
def _local_vlan_match(ofp, ofpp, port, lvid):
return ofpp.OFPMatch(in_port=port, vlan_vid=lvid | ofp.OFPVID_PRESENT)
def provision_local_vlan(self, port, lvid, segmentation_id, distributed):
table_id = constants.LOCAL_VLAN_TRANSLATION if distributed else 0
(_dp, ofp, ofpp) = self._get_dp()
match = self._local_vlan_match(ofp, ofpp, port, lvid)
if segmentation_id is None:
actions = [ofpp.OFPActionPopVlan()]
else:
vlan_vid = segmentation_id | ofp.OFPVID_PRESENT
actions = [ofpp.OFPActionSetField(vlan_vid=vlan_vid)]
actions += [ofpp.OFPActionOutput(ofp.OFPP_NORMAL, 0)]
self.install_apply_actions(table_id=table_id,
priority=4,
match=match,
actions=actions)
def reclaim_local_vlan(self, port, lvid):
(_dp, ofp, ofpp) = self._get_dp()
match = self._local_vlan_match(ofp, ofpp, port, lvid)
self.delete_flows(match=match)
def add_dvr_mac_vlan(self, mac, port):
self.install_output(table_id=constants.DVR_NOT_LEARN_VLAN,
priority=2, eth_src=mac, port=port)
def remove_dvr_mac_vlan(self, mac):
# REVISIT(yamamoto): match in_port as well?
self.delete_flows(table_id=constants.DVR_NOT_LEARN_VLAN,
eth_src=mac)
| apache-2.0 |
wakashige/bazel | third_party/py/gflags/gflags_validators.py | 488 | 6977 | #!/usr/bin/env python
# Copyright (c) 2010, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Module to enforce different constraints on flags.
A validator represents an invariant, enforced over a one or more flags.
See 'FLAGS VALIDATORS' in gflags.py's docstring for a usage manual.
"""
__author__ = '[email protected] (Olexiy Oryeshko)'
class Error(Exception):
"""Thrown If validator constraint is not satisfied."""
class Validator(object):
"""Base class for flags validators.
Users should NOT overload these classes, and use gflags.Register...
methods instead.
"""
# Used to assign each validator an unique insertion_index
validators_count = 0
def __init__(self, checker, message):
"""Constructor to create all validators.
Args:
checker: function to verify the constraint.
Input of this method varies, see SimpleValidator and
DictionaryValidator for a detailed description.
message: string, error message to be shown to the user
"""
self.checker = checker
self.message = message
Validator.validators_count += 1
# Used to assert validators in the order they were registered (CL/18694236)
self.insertion_index = Validator.validators_count
def Verify(self, flag_values):
"""Verify that constraint is satisfied.
flags library calls this method to verify Validator's constraint.
Args:
flag_values: gflags.FlagValues, containing all flags
Raises:
Error: if constraint is not satisfied.
"""
param = self._GetInputToCheckerFunction(flag_values)
if not self.checker(param):
raise Error(self.message)
def GetFlagsNames(self):
"""Return the names of the flags checked by this validator.
Returns:
[string], names of the flags
"""
raise NotImplementedError('This method should be overloaded')
def PrintFlagsWithValues(self, flag_values):
raise NotImplementedError('This method should be overloaded')
def _GetInputToCheckerFunction(self, flag_values):
"""Given flag values, construct the input to be given to checker.
Args:
flag_values: gflags.FlagValues, containing all flags.
Returns:
Return type depends on the specific validator.
"""
raise NotImplementedError('This method should be overloaded')
class SimpleValidator(Validator):
"""Validator behind RegisterValidator() method.
Validates that a single flag passes its checker function. The checker function
takes the flag value and returns True (if value looks fine) or, if flag value
is not valid, either returns False or raises an Exception."""
def __init__(self, flag_name, checker, message):
"""Constructor.
Args:
flag_name: string, name of the flag.
checker: function to verify the validator.
input - value of the corresponding flag (string, boolean, etc).
output - Boolean. Must return True if validator constraint is satisfied.
If constraint is not satisfied, it should either return False or
raise Error.
message: string, error message to be shown to the user if validator's
condition is not satisfied
"""
super(SimpleValidator, self).__init__(checker, message)
self.flag_name = flag_name
def GetFlagsNames(self):
return [self.flag_name]
def PrintFlagsWithValues(self, flag_values):
return 'flag --%s=%s' % (self.flag_name, flag_values[self.flag_name].value)
def _GetInputToCheckerFunction(self, flag_values):
"""Given flag values, construct the input to be given to checker.
Args:
flag_values: gflags.FlagValues
Returns:
value of the corresponding flag.
"""
return flag_values[self.flag_name].value
class DictionaryValidator(Validator):
"""Validator behind RegisterDictionaryValidator method.
Validates that flag values pass their common checker function. The checker
function takes flag values and returns True (if values look fine) or,
if values are not valid, either returns False or raises an Exception.
"""
def __init__(self, flag_names, checker, message):
"""Constructor.
Args:
flag_names: [string], containing names of the flags used by checker.
checker: function to verify the validator.
input - dictionary, with keys() being flag_names, and value for each
key being the value of the corresponding flag (string, boolean, etc).
output - Boolean. Must return True if validator constraint is satisfied.
If constraint is not satisfied, it should either return False or
raise Error.
message: string, error message to be shown to the user if validator's
condition is not satisfied
"""
super(DictionaryValidator, self).__init__(checker, message)
self.flag_names = flag_names
def _GetInputToCheckerFunction(self, flag_values):
"""Given flag values, construct the input to be given to checker.
Args:
flag_values: gflags.FlagValues
Returns:
dictionary, with keys() being self.lag_names, and value for each key
being the value of the corresponding flag (string, boolean, etc).
"""
return dict([key, flag_values[key].value] for key in self.flag_names)
def PrintFlagsWithValues(self, flag_values):
prefix = 'flags '
flags_with_values = []
for key in self.flag_names:
flags_with_values.append('%s=%s' % (key, flag_values[key].value))
return prefix + ', '.join(flags_with_values)
def GetFlagsNames(self):
return self.flag_names
| apache-2.0 |
3dfxsoftware/cbss-addons | TODO-7.0/rent/wizard/__init__.py | 4 | 1149 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Addons modules by CLEARCORP S.A.
# Copyright (C) 2009-TODAY CLEARCORP S.A. (<http://clearcorp.co.cr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import rent_make_group
import rent_check_invoicing
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| gpl-2.0 |
mistercrunch/panoramix | superset/sql_parse.py | 2 | 12236 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
from dataclasses import dataclass # pylint: disable=wrong-import-order
from enum import Enum
from typing import List, Optional, Set
from urllib import parse
import sqlparse
from sqlparse.sql import (
Identifier,
IdentifierList,
Parenthesis,
remove_quotes,
Token,
TokenList,
)
from sqlparse.tokens import Keyword, Name, Punctuation, String, Whitespace
from sqlparse.utils import imt
RESULT_OPERATIONS = {"UNION", "INTERSECT", "EXCEPT", "SELECT"}
ON_KEYWORD = "ON"
PRECEDES_TABLE_NAME = {"FROM", "JOIN", "DESCRIBE", "WITH", "LEFT JOIN", "RIGHT JOIN"}
CTE_PREFIX = "CTE__"
logger = logging.getLogger(__name__)
class CtasMethod(str, Enum):
TABLE = "TABLE"
VIEW = "VIEW"
def _extract_limit_from_query(statement: TokenList) -> Optional[int]:
"""
Extract limit clause from SQL statement.
:param statement: SQL statement
:return: Limit extracted from query, None if no limit present in statement
"""
idx, _ = statement.token_next_by(m=(Keyword, "LIMIT"))
if idx is not None:
_, token = statement.token_next(idx=idx)
if token:
if isinstance(token, IdentifierList):
# In case of "LIMIT <offset>, <limit>", find comma and extract
# first succeeding non-whitespace token
idx, _ = token.token_next_by(m=(sqlparse.tokens.Punctuation, ","))
_, token = token.token_next(idx=idx)
if token and token.ttype == sqlparse.tokens.Literal.Number.Integer:
return int(token.value)
return None
def strip_comments_from_sql(statement: str) -> str:
"""
Strips comments from a SQL statement, does a simple test first
to avoid always instantiating the expensive ParsedQuery constructor
This is useful for engines that don't support comments
:param statement: A string with the SQL statement
:return: SQL statement without comments
"""
return ParsedQuery(statement).strip_comments() if "--" in statement else statement
@dataclass(eq=True, frozen=True)
class Table: # pylint: disable=too-few-public-methods
"""
A fully qualified SQL table conforming to [[catalog.]schema.]table.
"""
table: str
schema: Optional[str] = None
catalog: Optional[str] = None
def __str__(self) -> str:
"""
Return the fully qualified SQL table name.
"""
return ".".join(
parse.quote(part, safe="").replace(".", "%2E")
for part in [self.catalog, self.schema, self.table]
if part
)
class ParsedQuery:
def __init__(self, sql_statement: str, strip_comments: bool = False):
if strip_comments:
sql_statement = sqlparse.format(sql_statement, strip_comments=True)
self.sql: str = sql_statement
self._tables: Set[Table] = set()
self._alias_names: Set[str] = set()
self._limit: Optional[int] = None
logger.debug("Parsing with sqlparse statement: %s", self.sql)
self._parsed = sqlparse.parse(self.stripped())
for statement in self._parsed:
self._limit = _extract_limit_from_query(statement)
@property
def tables(self) -> Set[Table]:
if not self._tables:
for statement in self._parsed:
self._extract_from_token(statement)
self._tables = {
table for table in self._tables if str(table) not in self._alias_names
}
return self._tables
@property
def limit(self) -> Optional[int]:
return self._limit
def is_select(self) -> bool:
return self._parsed[0].get_type() == "SELECT"
def is_valid_ctas(self) -> bool:
return self._parsed[-1].get_type() == "SELECT"
def is_valid_cvas(self) -> bool:
return len(self._parsed) == 1 and self._parsed[0].get_type() == "SELECT"
def is_explain(self) -> bool:
# Remove comments
statements_without_comments = sqlparse.format(
self.stripped(), strip_comments=True
)
# Explain statements will only be the first statement
return statements_without_comments.startswith("EXPLAIN")
def is_show(self) -> bool:
# Remove comments
statements_without_comments = sqlparse.format(
self.stripped(), strip_comments=True
)
# Show statements will only be the first statement
return statements_without_comments.upper().startswith("SHOW")
def is_set(self) -> bool:
# Remove comments
statements_without_comments = sqlparse.format(
self.stripped(), strip_comments=True
)
# Set statements will only be the first statement
return statements_without_comments.upper().startswith("SET")
def is_unknown(self) -> bool:
return self._parsed[0].get_type() == "UNKNOWN"
def stripped(self) -> str:
return self.sql.strip(" \t\n;")
def strip_comments(self) -> str:
return sqlparse.format(self.stripped(), strip_comments=True)
def get_statements(self) -> List[str]:
"""Returns a list of SQL statements as strings, stripped"""
statements = []
for statement in self._parsed:
if statement:
sql = str(statement).strip(" \n;\t")
if sql:
statements.append(sql)
return statements
@staticmethod
def _get_table(tlist: TokenList) -> Optional[Table]:
"""
Return the table if valid, i.e., conforms to the [[catalog.]schema.]table
construct.
:param tlist: The SQL tokens
:returns: The table if the name conforms
"""
# Strip the alias if present.
idx = len(tlist.tokens)
if tlist.has_alias():
ws_idx, _ = tlist.token_next_by(t=Whitespace)
if ws_idx != -1:
idx = ws_idx
tokens = tlist.tokens[:idx]
if (
len(tokens) in (1, 3, 5)
and all(imt(token, t=[Name, String]) for token in tokens[::2])
and all(imt(token, m=(Punctuation, ".")) for token in tokens[1::2])
):
return Table(*[remove_quotes(token.value) for token in tokens[::-2]])
return None
@staticmethod
def _is_identifier(token: Token) -> bool:
return isinstance(token, (IdentifierList, Identifier))
def _process_tokenlist(self, token_list: TokenList) -> None:
"""
Add table names to table set
:param token_list: TokenList to be processed
"""
# exclude subselects
if "(" not in str(token_list):
table = self._get_table(token_list)
if table and not table.table.startswith(CTE_PREFIX):
self._tables.add(table)
return
# store aliases
if token_list.has_alias():
self._alias_names.add(token_list.get_alias())
# some aliases are not parsed properly
if token_list.tokens[0].ttype == Name:
self._alias_names.add(token_list.tokens[0].value)
self._extract_from_token(token_list)
def as_create_table(
self,
table_name: str,
schema_name: Optional[str] = None,
overwrite: bool = False,
method: CtasMethod = CtasMethod.TABLE,
) -> str:
"""Reformats the query into the create table as query.
Works only for the single select SQL statements, in all other cases
the sql query is not modified.
:param table_name: table that will contain the results of the query execution
:param schema_name: schema name for the target table
:param overwrite: table_name will be dropped if true
:param method: method for the CTA query, currently view or table creation
:return: Create table as query
"""
exec_sql = ""
sql = self.stripped()
# TODO(bkyryliuk): quote full_table_name
full_table_name = f"{schema_name}.{table_name}" if schema_name else table_name
if overwrite:
exec_sql = f"DROP {method} IF EXISTS {full_table_name};\n"
exec_sql += f"CREATE {method} {full_table_name} AS \n{sql}"
return exec_sql
def _extract_from_token( # pylint: disable=too-many-branches
self, token: Token
) -> None:
"""
<Identifier> store a list of subtokens and <IdentifierList> store lists of
subtoken list.
It extracts <IdentifierList> and <Identifier> from :param token: and loops
through all subtokens recursively. It finds table_name_preceding_token and
passes <IdentifierList> and <Identifier> to self._process_tokenlist to populate
self._tables.
:param token: instance of Token or child class, e.g. TokenList, to be processed
"""
if not hasattr(token, "tokens"):
return
table_name_preceding_token = False
for item in token.tokens:
if item.is_group and (
not self._is_identifier(item) or isinstance(item.tokens[0], Parenthesis)
):
self._extract_from_token(item)
if item.ttype in Keyword and (
item.normalized in PRECEDES_TABLE_NAME
or item.normalized.endswith(" JOIN")
):
table_name_preceding_token = True
continue
if item.ttype in Keyword:
table_name_preceding_token = False
continue
if table_name_preceding_token:
if isinstance(item, Identifier):
self._process_tokenlist(item)
elif isinstance(item, IdentifierList):
for token2 in item.get_identifiers():
if isinstance(token2, TokenList):
self._process_tokenlist(token2)
elif isinstance(item, IdentifierList):
if any(not self._is_identifier(token2) for token2 in item.tokens):
self._extract_from_token(item)
def set_or_update_query_limit(self, new_limit: int, force: bool = False) -> str:
"""Returns the query with the specified limit.
Does not change the underlying query if user did not apply the limit,
otherwise replaces the limit with the lower value between existing limit
in the query and new_limit.
:param new_limit: Limit to be incorporated into returned query
:return: The original query with new limit
"""
if not self._limit:
return f"{self.stripped()}\nLIMIT {new_limit}"
limit_pos = None
statement = self._parsed[0]
# Add all items to before_str until there is a limit
for pos, item in enumerate(statement.tokens):
if item.ttype in Keyword and item.value.lower() == "limit":
limit_pos = pos
break
_, limit = statement.token_next(idx=limit_pos)
# Override the limit only when it exceeds the configured value.
if limit.ttype == sqlparse.tokens.Literal.Number.Integer and (
force or new_limit < int(limit.value)
):
limit.value = new_limit
elif limit.is_group:
limit.value = f"{next(limit.get_identifiers())}, {new_limit}"
str_res = ""
for i in statement.tokens:
str_res += str(i.value)
return str_res
| apache-2.0 |
eceglov/phantomjs | src/qt/qtwebkit/Source/JavaScriptCore/disassembler/udis86/ud_opcode.py | 118 | 9310 | # udis86 - scripts/ud_opcode.py
#
# Copyright (c) 2009 Vivek Thampi
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class UdOpcodeTables:
TableInfo = {
'opctbl' : { 'name' : 'UD_TAB__OPC_TABLE', 'size' : 256 },
'/sse' : { 'name' : 'UD_TAB__OPC_SSE', 'size' : 4 },
'/reg' : { 'name' : 'UD_TAB__OPC_REG', 'size' : 8 },
'/rm' : { 'name' : 'UD_TAB__OPC_RM', 'size' : 8 },
'/mod' : { 'name' : 'UD_TAB__OPC_MOD', 'size' : 2 },
'/m' : { 'name' : 'UD_TAB__OPC_MODE', 'size' : 3 },
'/x87' : { 'name' : 'UD_TAB__OPC_X87', 'size' : 64 },
'/a' : { 'name' : 'UD_TAB__OPC_ASIZE', 'size' : 3 },
'/o' : { 'name' : 'UD_TAB__OPC_OSIZE', 'size' : 3 },
'/3dnow' : { 'name' : 'UD_TAB__OPC_3DNOW', 'size' : 256 },
'vendor' : { 'name' : 'UD_TAB__OPC_VENDOR', 'size' : 3 },
}
OpcodeTable0 = {
'type' : 'opctbl',
'entries' : {},
'meta' : 'table0'
}
OpcExtIndex = {
# ssef2, ssef3, sse66
'sse': {
'none' : '00',
'f2' : '01',
'f3' : '02',
'66' : '03'
},
# /mod=
'mod': {
'!11' : '00',
'11' : '01'
},
# /m=, /o=, /a=
'mode': {
'16' : '00',
'32' : '01',
'64' : '02'
},
'vendor' : {
'amd' : '00',
'intel' : '01',
'any' : '02'
}
}
InsnTable = []
MnemonicsTable = []
ThreeDNowTable = {}
def sizeOfTable( self, t ):
return self.TableInfo[ t ][ 'size' ]
def nameOfTable( self, t ):
return self.TableInfo[ t ][ 'name' ]
#
# Updates a table entry: If the entry doesn't exist
# it will create the entry, otherwise, it will walk
# while validating the path.
#
def updateTable( self, table, index, type, meta ):
if not index in table[ 'entries' ]:
table[ 'entries' ][ index ] = { 'type' : type, 'entries' : {}, 'meta' : meta }
if table[ 'entries' ][ index ][ 'type' ] != type:
raise NameError( "error: violation in opcode mapping (overwrite) %s with %s." %
( table[ 'entries' ][ index ][ 'type' ], type) )
return table[ 'entries' ][ index ]
class Insn:
"""An abstract type representing an instruction in the opcode map.
"""
# A mapping of opcode extensions to their representational
# values used in the opcode map.
OpcExtMap = {
'/rm' : lambda v: "%02x" % int(v, 16),
'/x87' : lambda v: "%02x" % int(v, 16),
'/3dnow' : lambda v: "%02x" % int(v, 16),
'/reg' : lambda v: "%02x" % int(v, 16),
# modrm.mod
# (!11, 11) => (00, 01)
'/mod' : lambda v: '00' if v == '!11' else '01',
# Mode extensions:
# (16, 32, 64) => (00, 01, 02)
'/o' : lambda v: "%02x" % (int(v) / 32),
'/a' : lambda v: "%02x" % (int(v) / 32),
'/m' : lambda v: "%02x" % (int(v) / 32),
'/sse' : lambda v: UdOpcodeTables.OpcExtIndex['sse'][v]
}
def __init__(self, prefixes, mnemonic, opcodes, operands, vendor):
self.opcodes = opcodes
self.prefixes = prefixes
self.mnemonic = mnemonic
self.operands = operands
self.vendor = vendor
self.opcext = {}
ssePrefix = None
if self.opcodes[0] in ('ssef2', 'ssef3', 'sse66'):
ssePrefix = self.opcodes[0][3:]
self.opcodes.pop(0)
# do some preliminary decoding of the instruction type
# 1byte, 2byte or 3byte instruction?
self.nByteInsn = 1
if self.opcodes[0] == '0f': # 2byte
# 2+ byte opcodes are always disambiguated by an
# sse prefix, unless it is a 3d now instruction
# which is 0f 0f ...
if self.opcodes[1] != '0f' and ssePrefix is None:
ssePrefix = 'none'
if self.opcodes[1] in ('38', '3a'): # 3byte
self.nByteInsn = 3
else:
self.nByteInsn = 2
# The opcode that indexes into the opcode table.
self.opcode = self.opcodes[self.nByteInsn - 1]
# Record opcode extensions
for opcode in self.opcodes[self.nByteInsn:]:
arg, val = opcode.split('=')
self.opcext[arg] = self.OpcExtMap[arg](val)
# Record sse extension: the reason sse extension is handled
# separately is that historically sse was handled as a first
# class opcode, not as an extension. Now that sse is handled
# as an extension, we do the manual conversion here, as opposed
# to modifying the opcode xml file.
if ssePrefix is not None:
self.opcext['/sse'] = self.OpcExtMap['/sse'](ssePrefix)
def parse(self, table, insn):
index = insn.opcodes[0];
if insn.nByteInsn > 1:
assert index == '0f'
table = self.updateTable(table, index, 'opctbl', '0f')
index = insn.opcodes[1]
if insn.nByteInsn == 3:
table = self.updateTable(table, index, 'opctbl', index)
index = insn.opcodes[2]
# Walk down the tree, create levels as needed, for opcode
# extensions. The order is important, and determines how
# well the opcode table is packed. Also note, /sse must be
# before /o, because /sse may consume operand size prefix
# affect the outcome of /o.
for ext in ('/mod', '/x87', '/reg', '/rm', '/sse',
'/o', '/a', '/m', '/3dnow'):
if ext in insn.opcext:
table = self.updateTable(table, index, ext, ext)
index = insn.opcext[ext]
# additional table for disambiguating vendor
if len(insn.vendor):
table = self.updateTable(table, index, 'vendor', insn.vendor)
index = self.OpcExtIndex['vendor'][insn.vendor]
# make leaf node entries
leaf = self.updateTable(table, index, 'insn', '')
leaf['mnemonic'] = insn.mnemonic
leaf['prefixes'] = insn.prefixes
leaf['operands'] = insn.operands
# add instruction to linear table of instruction forms
self.InsnTable.append({ 'prefixes' : insn.prefixes,
'mnemonic' : insn.mnemonic,
'operands' : insn.operands })
# add mnemonic to mnemonic table
if not insn.mnemonic in self.MnemonicsTable:
self.MnemonicsTable.append(insn.mnemonic)
# Adds an instruction definition to the opcode tables
def addInsnDef( self, prefixes, mnemonic, opcodes, operands, vendor ):
insn = self.Insn(prefixes=prefixes,
mnemonic=mnemonic,
opcodes=opcodes,
operands=operands,
vendor=vendor)
self.parse(self.OpcodeTable0, insn)
def print_table( self, table, pfxs ):
print("%s |" % pfxs)
keys = table[ 'entries' ].keys()
if ( len( keys ) ):
keys.sort()
for idx in keys:
e = table[ 'entries' ][ idx ]
if e[ 'type' ] == 'insn':
print("%s |-<%s>" % ( pfxs, idx )),
print("%s %s" % ( e[ 'mnemonic' ], ' '.join( e[ 'operands'] )))
else:
print("%s |-<%s> %s" % ( pfxs, idx, e['type'] ))
self.print_table( e, pfxs + ' |' )
def print_tree( self ):
self.print_table( self.OpcodeTable0, '' )
| bsd-3-clause |
lqdc/pyew | vstruct/defs/rar.py | 18 | 1851 | import vstruct
from vstruct.primitives import *
HEAD_TYPE_MARKER = 0x72 #marker block
HEAD_TYPE_ARCHIVE = 0x73 #archive header
HEAD_TYPE_FILE_HDR = 0x74 #file header
HEAD_TYPE_OLD_COMMENT = 0x75 #old style comment header
HEAD_TYPE_OLD_AUTH = 0x76 #old style authenticity information
HEAD_TYPE_OLD_SUBBLOCK = 0x77 #old style subblock
HEAD_TYPE_OLD_RECOVERY = 0x78 #old style recovery record
HEAD_TYPE_OLD_AUTH2 = 0x79 #old style authenticity information
HEAD_TYPE_SUBBLOCK = 0x7a #subblock
class RarChunkUnkn(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.CHUNK_BYTES = v_bytes()
class RarBlock(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.HEAD_CRC = v_uint16()
self.HEAD_TYPE = v_uint8()
self.HEAD_FLAGS = v_uint16()
self.HEAD_SIZE = v_uint16()
self.ADD_SIZE = v_uint32()
self.BLOCK_DATA = vstruct.VStruct()
def pcb_HEAD_FLAGS(self):
# a proto callback for the header
if self.HEAD_FLAGS & 0x8000:
self.ADD_SIZE = v_uint32()
else:
self.ADD_SIZE = vstruct.VStruct()
def pcb_ADD_SIZE(self):
hsize = 7
totsize = self.HEAD_SIZE
if not isinstance(self.ADD_SIZE, vstruct.VStruct):
hsize += 4
totsize += self.ADD_SIZE
# We will *now* use TYPE to find out our chunk guts
self.BLOCK_DATA = v_bytes(totsize - hsize)
if __name__ == '__main__':
import sys
offset = 0
b = file(sys.argv[1], 'rb').read()
while offset < len(b):
r = RarBlock()
offset = r.vsParse( b, offset=offset)
print r.tree()
| gpl-2.0 |
pk400/catering | myvenv/lib/python3.4/site-packages/pip/_vendor/requests/packages/chardet/gb2312prober.py | 2994 | 1681 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import GB2312DistributionAnalysis
from .mbcssm import GB2312SMModel
class GB2312Prober(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(GB2312SMModel)
self._mDistributionAnalyzer = GB2312DistributionAnalysis()
self.reset()
def get_charset_name(self):
return "GB2312"
| mit |
Philippe12/external_chromium_org | build/util/lib/common/unittest_util.py | 138 | 4879 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utilities for dealing with the python unittest module."""
import fnmatch
import sys
import unittest
class _TextTestResult(unittest._TextTestResult):
"""A test result class that can print formatted text results to a stream.
Results printed in conformance with gtest output format, like:
[ RUN ] autofill.AutofillTest.testAutofillInvalid: "test desc."
[ OK ] autofill.AutofillTest.testAutofillInvalid
[ RUN ] autofill.AutofillTest.testFillProfile: "test desc."
[ OK ] autofill.AutofillTest.testFillProfile
[ RUN ] autofill.AutofillTest.testFillProfileCrazyCharacters: "Test."
[ OK ] autofill.AutofillTest.testFillProfileCrazyCharacters
"""
def __init__(self, stream, descriptions, verbosity):
unittest._TextTestResult.__init__(self, stream, descriptions, verbosity)
self._fails = set()
def _GetTestURI(self, test):
return '%s.%s.%s' % (test.__class__.__module__,
test.__class__.__name__,
test._testMethodName)
def getDescription(self, test):
return '%s: "%s"' % (self._GetTestURI(test), test.shortDescription())
def startTest(self, test):
unittest.TestResult.startTest(self, test)
self.stream.writeln('[ RUN ] %s' % self.getDescription(test))
def addSuccess(self, test):
unittest.TestResult.addSuccess(self, test)
self.stream.writeln('[ OK ] %s' % self._GetTestURI(test))
def addError(self, test, err):
unittest.TestResult.addError(self, test, err)
self.stream.writeln('[ ERROR ] %s' % self._GetTestURI(test))
self._fails.add(self._GetTestURI(test))
def addFailure(self, test, err):
unittest.TestResult.addFailure(self, test, err)
self.stream.writeln('[ FAILED ] %s' % self._GetTestURI(test))
self._fails.add(self._GetTestURI(test))
def getRetestFilter(self):
return ':'.join(self._fails)
class TextTestRunner(unittest.TextTestRunner):
"""Test Runner for displaying test results in textual format.
Results are displayed in conformance with google test output.
"""
def __init__(self, verbosity=1):
unittest.TextTestRunner.__init__(self, stream=sys.stderr,
verbosity=verbosity)
def _makeResult(self):
return _TextTestResult(self.stream, self.descriptions, self.verbosity)
def GetTestsFromSuite(suite):
"""Returns all the tests from a given test suite."""
tests = []
for x in suite:
if isinstance(x, unittest.TestSuite):
tests += GetTestsFromSuite(x)
else:
tests += [x]
return tests
def GetTestNamesFromSuite(suite):
"""Returns a list of every test name in the given suite."""
return map(lambda x: GetTestName(x), GetTestsFromSuite(suite))
def GetTestName(test):
"""Gets the test name of the given unittest test."""
return '.'.join([test.__class__.__module__,
test.__class__.__name__,
test._testMethodName])
def FilterTestSuite(suite, gtest_filter):
"""Returns a new filtered tests suite based on the given gtest filter.
See http://code.google.com/p/googletest/wiki/AdvancedGuide
for gtest_filter specification.
"""
return unittest.TestSuite(FilterTests(GetTestsFromSuite(suite), gtest_filter))
def FilterTests(all_tests, gtest_filter):
"""Filter a list of tests based on the given gtest filter.
Args:
all_tests: List of tests (unittest.TestSuite)
gtest_filter: Filter to apply.
Returns:
Filtered subset of the given list of tests.
"""
test_names = [GetTestName(test) for test in all_tests]
filtered_names = FilterTestNames(test_names, gtest_filter)
return [test for test in all_tests if GetTestName(test) in filtered_names]
def FilterTestNames(all_tests, gtest_filter):
"""Filter a list of test names based on the given gtest filter.
See http://code.google.com/p/googletest/wiki/AdvancedGuide
for gtest_filter specification.
Args:
all_tests: List of test names.
gtest_filter: Filter to apply.
Returns:
Filtered subset of the given list of test names.
"""
pattern_groups = gtest_filter.split('-')
positive_patterns = pattern_groups[0].split(':')
negative_patterns = None
if len(pattern_groups) > 1:
negative_patterns = pattern_groups[1].split(':')
tests = []
for test in all_tests:
# Test name must by matched by one positive pattern.
for pattern in positive_patterns:
if fnmatch.fnmatch(test, pattern):
break
else:
continue
# Test name must not be matched by any negative patterns.
for pattern in negative_patterns or []:
if fnmatch.fnmatch(test, pattern):
break
else:
tests += [test]
return tests
| bsd-3-clause |
chatcannon/numpy | tools/test-installed-numpy.py | 106 | 2385 | #!/usr/bin/env python
from __future__ import division, absolute_import, print_function
# A simple script to test the installed version of numpy by calling
# 'numpy.test()'. Key features:
# -- convenient command-line syntax
# -- sets exit status appropriately, useful for automated test environments
# It would be better to set this up as a module in the numpy namespace, so
# that it could be run as:
# python -m numpy.run_tests <args>
# But, python2.4's -m switch only works with top-level modules, not modules
# that are inside packages. So, once we drop 2.4 support, maybe...
import sys, os
# In case we are run from the source directory, we don't want to import numpy
# from there, we want to import the installed version:
sys.path.pop(0)
from optparse import OptionParser
parser = OptionParser("usage: %prog [options] -- [nosetests options]")
parser.add_option("-v", "--verbose",
action="count", dest="verbose", default=1,
help="increase verbosity")
parser.add_option("--doctests",
action="store_true", dest="doctests", default=False,
help="Run doctests in module")
parser.add_option("--coverage",
action="store_true", dest="coverage", default=False,
help="report coverage of NumPy code (requires 'coverage' module")
parser.add_option("-m", "--mode",
action="store", dest="mode", default="fast",
help="'fast', 'full', or something that could be "
"passed to nosetests -A [default: %default]")
(options, args) = parser.parse_args()
import numpy
# Check that NPY_RELAXED_STRIDES_CHECKING is active when set.
# The same flags check is also used in the tests to switch behavior.
if (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', "1") != "0"):
if not numpy.ones((10, 1), order='C').flags.f_contiguous:
print('NPY_RELAXED_STRIDES_CHECKING set, but not active.')
sys.exit(1)
elif numpy.ones((10, 1), order='C').flags.f_contiguous:
print('NPY_RELAXED_STRIDES_CHECKING not set, but active.')
sys.exit(1)
result = numpy.test(options.mode,
verbose=options.verbose,
extra_argv=args,
doctests=options.doctests,
coverage=options.coverage)
if result.wasSuccessful():
sys.exit(0)
else:
sys.exit(1)
| bsd-3-clause |
felixbuenemann/sentry | src/sentry/migrations/0176_auto__add_field_organizationmember_counter__add_unique_organizationmem.py | 34 | 37043 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'OrganizationMember.counter'
db.add_column('sentry_organizationmember', 'counter',
self.gf('sentry.db.models.fields.bounded.BoundedPositiveIntegerField')(null=True, blank=True),
keep_default=False)
# Adding unique constraint on 'OrganizationMember', fields ['organization', 'counter']
db.create_unique('sentry_organizationmember', ['organization_id', 'counter'])
def backwards(self, orm):
# Removing unique constraint on 'OrganizationMember', fields ['organization', 'counter']
db.delete_unique('sentry_organizationmember', ['organization_id', 'counter'])
# Deleting field 'OrganizationMember.counter'
db.delete_column('sentry_organizationmember', 'counter')
models = {
'sentry.accessgroup': {
'Meta': {'unique_together': "(('team', 'name'),)", 'object_name': 'AccessGroup'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.User']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Project']", 'symmetrical': 'False'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"}),
'type': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '50'})
},
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Event']", 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.alert': {
'Meta': {'object_name': 'Alert'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'related_groups': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_alerts'", 'symmetrical': 'False', 'through': "orm['sentry.AlertRelatedGroup']", 'to': "orm['sentry.Group']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.alertrelatedgroup': {
'Meta': {'unique_together': "(('group', 'alert'),)", 'object_name': 'AlertRelatedGroup'},
'alert': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Alert']"}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.apikey': {
'Meta': {'object_name': 'ApiKey'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'default': "'Default'", 'max_length': '64', 'blank': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Organization']"}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.auditlogentry': {
'Meta': {'object_name': 'AuditLogEntry'},
'actor': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'audit_actors'", 'to': "orm['sentry.User']"}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'target_object': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'target_user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_targets'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.authidentity': {
'Meta': {'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))", 'object_name': 'AuthIdentity'},
'auth_provider': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.AuthProvider']"}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'last_synced': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authprovider': {
'Meta': {'object_name': 'AuthProvider'},
'config': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'default_role': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'default_teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'unique': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'sync_time': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.broadcast': {
'Meta': {'object_name': 'Broadcast'},
'badge': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'", 'index_together': "(('group', 'datetime'),)"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'null': 'True'})
},
'sentry.eventmapping': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.file': {
'Meta': {'object_name': 'File'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'}),
'headers': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'storage': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'storage_options': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'time_spent_total': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.groupassignee': {
'Meta': {'object_name': 'GroupAssignee', 'db_table': "'sentry_groupasignee'"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'unique': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_assignee_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.grouphash': {
'Meta': {'unique_together': "(('project', 'hash'),)", 'object_name': 'GroupHash'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.grouprulestatus': {
'Meta': {'unique_together': "(('rule', 'group'),)", 'object_name': 'GroupRuleStatus'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'rule': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Rule']"}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'sentry.groupseen': {
'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.grouptagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'null': 'True', 'to': "orm['sentry.Project']"}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.helppage': {
'Meta': {'object_name': 'HelpPage'},
'content': ('django.db.models.fields.TextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True'}),
'priority': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {'object_name': 'Organization'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'org_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.organizationaccessrequest': {
'Meta': {'unique_together': "(('team', 'member'),)", 'object_name': 'OrganizationAccessRequest'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'member': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationmember': {
'Meta': {'unique_together': "(('organization', 'user'), ('organization', 'email'), ('organization', 'counter'))", 'object_name': 'OrganizationMember'},
'counter': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Organization']"}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMemberTeam']", 'blank': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'sentry_orgmember_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.organizationmemberteam': {
'Meta': {'unique_together': "(('team', 'organizationmember'),)", 'object_name': 'OrganizationMemberTeam', 'db_table': "'sentry_organizationmember_teams'"},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'organizationmember': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'), ('organization', 'slug'))", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'roles': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.release': {
'Meta': {'unique_together': "(('project', 'version'),)", 'object_name': 'Release'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.releasefile': {
'Meta': {'unique_together': "(('release', 'ident'),)", 'object_name': 'ReleaseFile'},
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'name': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.rule': {
'Meta': {'object_name': 'Rule'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.tagkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.tagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'"},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.team': {
'Meta': {'unique_together': "(('organization', 'slug'),)", 'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
}
}
complete_apps = ['sentry'] | bsd-3-clause |
MarishaYasko/interactive-stories-stands | InteractiveStands/Lib/encodings/iso8859_4.py | 272 | 13376 | """ Python Character Mapping Codec iso8859_4 generated from 'MAPPINGS/ISO8859/8859-4.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-4',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\x80' # 0x80 -> <control>
'\x81' # 0x81 -> <control>
'\x82' # 0x82 -> <control>
'\x83' # 0x83 -> <control>
'\x84' # 0x84 -> <control>
'\x85' # 0x85 -> <control>
'\x86' # 0x86 -> <control>
'\x87' # 0x87 -> <control>
'\x88' # 0x88 -> <control>
'\x89' # 0x89 -> <control>
'\x8a' # 0x8A -> <control>
'\x8b' # 0x8B -> <control>
'\x8c' # 0x8C -> <control>
'\x8d' # 0x8D -> <control>
'\x8e' # 0x8E -> <control>
'\x8f' # 0x8F -> <control>
'\x90' # 0x90 -> <control>
'\x91' # 0x91 -> <control>
'\x92' # 0x92 -> <control>
'\x93' # 0x93 -> <control>
'\x94' # 0x94 -> <control>
'\x95' # 0x95 -> <control>
'\x96' # 0x96 -> <control>
'\x97' # 0x97 -> <control>
'\x98' # 0x98 -> <control>
'\x99' # 0x99 -> <control>
'\x9a' # 0x9A -> <control>
'\x9b' # 0x9B -> <control>
'\x9c' # 0x9C -> <control>
'\x9d' # 0x9D -> <control>
'\x9e' # 0x9E -> <control>
'\x9f' # 0x9F -> <control>
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\u0104' # 0xA1 -> LATIN CAPITAL LETTER A WITH OGONEK
'\u0138' # 0xA2 -> LATIN SMALL LETTER KRA
'\u0156' # 0xA3 -> LATIN CAPITAL LETTER R WITH CEDILLA
'\xa4' # 0xA4 -> CURRENCY SIGN
'\u0128' # 0xA5 -> LATIN CAPITAL LETTER I WITH TILDE
'\u013b' # 0xA6 -> LATIN CAPITAL LETTER L WITH CEDILLA
'\xa7' # 0xA7 -> SECTION SIGN
'\xa8' # 0xA8 -> DIAERESIS
'\u0160' # 0xA9 -> LATIN CAPITAL LETTER S WITH CARON
'\u0112' # 0xAA -> LATIN CAPITAL LETTER E WITH MACRON
'\u0122' # 0xAB -> LATIN CAPITAL LETTER G WITH CEDILLA
'\u0166' # 0xAC -> LATIN CAPITAL LETTER T WITH STROKE
'\xad' # 0xAD -> SOFT HYPHEN
'\u017d' # 0xAE -> LATIN CAPITAL LETTER Z WITH CARON
'\xaf' # 0xAF -> MACRON
'\xb0' # 0xB0 -> DEGREE SIGN
'\u0105' # 0xB1 -> LATIN SMALL LETTER A WITH OGONEK
'\u02db' # 0xB2 -> OGONEK
'\u0157' # 0xB3 -> LATIN SMALL LETTER R WITH CEDILLA
'\xb4' # 0xB4 -> ACUTE ACCENT
'\u0129' # 0xB5 -> LATIN SMALL LETTER I WITH TILDE
'\u013c' # 0xB6 -> LATIN SMALL LETTER L WITH CEDILLA
'\u02c7' # 0xB7 -> CARON
'\xb8' # 0xB8 -> CEDILLA
'\u0161' # 0xB9 -> LATIN SMALL LETTER S WITH CARON
'\u0113' # 0xBA -> LATIN SMALL LETTER E WITH MACRON
'\u0123' # 0xBB -> LATIN SMALL LETTER G WITH CEDILLA
'\u0167' # 0xBC -> LATIN SMALL LETTER T WITH STROKE
'\u014a' # 0xBD -> LATIN CAPITAL LETTER ENG
'\u017e' # 0xBE -> LATIN SMALL LETTER Z WITH CARON
'\u014b' # 0xBF -> LATIN SMALL LETTER ENG
'\u0100' # 0xC0 -> LATIN CAPITAL LETTER A WITH MACRON
'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE
'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
'\u012e' # 0xC7 -> LATIN CAPITAL LETTER I WITH OGONEK
'\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON
'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
'\u0118' # 0xCA -> LATIN CAPITAL LETTER E WITH OGONEK
'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\u0116' # 0xCC -> LATIN CAPITAL LETTER E WITH DOT ABOVE
'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\u012a' # 0xCF -> LATIN CAPITAL LETTER I WITH MACRON
'\u0110' # 0xD0 -> LATIN CAPITAL LETTER D WITH STROKE
'\u0145' # 0xD1 -> LATIN CAPITAL LETTER N WITH CEDILLA
'\u014c' # 0xD2 -> LATIN CAPITAL LETTER O WITH MACRON
'\u0136' # 0xD3 -> LATIN CAPITAL LETTER K WITH CEDILLA
'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xd7' # 0xD7 -> MULTIPLICATION SIGN
'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
'\u0172' # 0xD9 -> LATIN CAPITAL LETTER U WITH OGONEK
'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\u0168' # 0xDD -> LATIN CAPITAL LETTER U WITH TILDE
'\u016a' # 0xDE -> LATIN CAPITAL LETTER U WITH MACRON
'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
'\u0101' # 0xE0 -> LATIN SMALL LETTER A WITH MACRON
'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE
'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
'\u012f' # 0xE7 -> LATIN SMALL LETTER I WITH OGONEK
'\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON
'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
'\u0119' # 0xEA -> LATIN SMALL LETTER E WITH OGONEK
'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
'\u0117' # 0xEC -> LATIN SMALL LETTER E WITH DOT ABOVE
'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\u012b' # 0xEF -> LATIN SMALL LETTER I WITH MACRON
'\u0111' # 0xF0 -> LATIN SMALL LETTER D WITH STROKE
'\u0146' # 0xF1 -> LATIN SMALL LETTER N WITH CEDILLA
'\u014d' # 0xF2 -> LATIN SMALL LETTER O WITH MACRON
'\u0137' # 0xF3 -> LATIN SMALL LETTER K WITH CEDILLA
'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf7' # 0xF7 -> DIVISION SIGN
'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
'\u0173' # 0xF9 -> LATIN SMALL LETTER U WITH OGONEK
'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
'\u0169' # 0xFD -> LATIN SMALL LETTER U WITH TILDE
'\u016b' # 0xFE -> LATIN SMALL LETTER U WITH MACRON
'\u02d9' # 0xFF -> DOT ABOVE
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| mit |
wSedlacek/kernel_moto_shamu | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py | 12980 | 5411 | # SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <[email protected]>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
| gpl-2.0 |
Geoion/AutobahnPython | examples/twisted/wamp/test_newapi3.py | 8 | 1593 | from twisted.internet.task import react
from twisted.internet.defer import inlineCallbacks as coroutine
from autobahn.twisted.connection import Connection
def main(reactor, connection):
@coroutine
def on_join(session, details):
print("on_join: {}".format(details))
try:
print(session._transport)
print(session._transport.websocket_protocol_in_use)
except Exception as e:
pass
def add2(a, b):
print("add2() called", a, b)
return a + b
yield session.register(add2, u'com.example.add2')
try:
res = yield session.call(u'com.example.add2', 2, 3)
print("result: {}".format(res))
except Exception as e:
print("error: {}".format(e))
finally:
print("leaving ..")
session.leave()
connection.on('join', on_join)
if __name__ == '__main__':
#import txaio
#txaio.use_twisted()
#txaio.start_logging(level='debug')
transports = [
{
'type': 'rawsocket',
'serializer': 'msgpack',
'endpoint': {
'type': 'unix',
'path': '/tmp/cb1.sock'
}
},
{
'type': 'websocket',
'url': 'ws://127.0.0.1:8080/ws',
'endpoint': {
'type': 'tcp',
'host': '127.0.0.1',
'port': 8080
}
}
]
connection = Connection(transports=transports)
connection.on('start', main)
react(connection.start)
| mit |
mjmottram/echidna | echidna/calc/decay.py | 1 | 24391 | """ Double beta decay utility converter
Provides a useful tool for converting between different double beta
dacay parameters.
"""
import numpy
from echidna.calc import constants as const
import echidna.test.physics_tests as physics_tests
class DBIsotope(object):
""" Class which calculates expected counts for a DBD isotope
over a given experiment livetime.
Args:
name (string): Name of the isotope.
atm_weight_iso (float): Atomic weight of isotope in g/mol.
atm_weight_nat (float): Atomic weight of natural element in g/mol.
abundance (float): Natural abundance of isotope with 0 to 1
equivalent to 0% to 100%.
phase_space (float): Phase space of the isotope.
matrix_element (float): Matrix element of the isotope.
loading (float, optional): Loading of isotope with 0 to 1
equivalent to 0% to 100%. Default is stored in
:class:`echidna.calc.constants`
fv_radius (float, optional): Radius of fiducial volume in mm.
Default is stored in :class:`echidna.calc.constants`
scint_density (float, optional): Density of liquid scintillator in
kg/mm^3. Default is stored in :class:`echidna.calc.constants`
outer_radius (float, optional): Radius of outer container
containing fiducial volume, e.g. AV, in mm. Default is stored in
:class:`echidna.calc.constants`
roi_efficiency (float, optional): Efficiency factor of ROI.
Calculated by dividing the integral of the spectrum, shrunk to
the ROI, by the integral of the full spectrum. Default is
0.62465 (-0.5 to 1.5 sigma integral of a standard gaussian)
Attributes:
_name (string): Name of the isotope.
_atm_weight_iso (float): Atomic weight of isotope in g/mol.
_atm_weight_nat (float): Atomic weight of natural element in g/mol.
_abundance (float): Natural abundance of isotope with 0 to 1
equivalent to 0% to 100%.
_phase_space (float): Phase space of the isotope.
_matrix_element (float): Matrix element of the isotope.
_loading (float): Loading of isotope with 0 to 1 equivalent to 0%
to 100%. Default is stored in :class:`echidna.calc.constants`
_fv_radius (float): Radius of fiducial volume in mm. Default is
stored in :class:`echidna.calc.constants`
_scint_density (float): Density of liquid scintillator in
kg/mm^3. Default is stored in :class:`echidna.calc.constants`
_outer_radius (float): Radius of outer container containing
fiducial volume, e.g. AV, in mm. Default is stored in
:class:`echidna.calc.constants`
_roi_efficiency (float): Efficiency factor of ROI. Calculated by
dividing the integral of the spectrum, shrunk to the ROI, by
the integral of the full spectrum. Default is 0.62465 (-0.5 to
1.5 sigma integral of a standard gaussian)
Raises:
ValueError: If abundance is < 0. or > 1.
ValueError: If :obj:`outer_radius` is negative or zero.
ValueError: If :obj:`fv_radius` is not between zero and
:obj:`outer_radius`.
"""
def __init__(self, name, atm_weight_iso, atm_weight_nat, abundance,
phase_space, matrix_element, loading=None, fv_radius=None,
outer_radius=None, scint_density=None,
roi_efficiency=0.62465):
if abundance < 0. or abundance > 1.:
raise ValueError("Abundance ranges from 0 to 1")
self._name = name
self._atm_weight_iso = atm_weight_iso
self._atm_weight_nat = atm_weight_nat
self._abundance = abundance
self._phase_space = phase_space
self._matrix_element = matrix_element
if loading:
if loading < 0. or loading > 1.:
raise ValueError("Loading ranges from 0 to 1")
self._loading = loading
else:
# Default SNO+ Loading
self._loading = const._loading
if outer_radius:
if outer_radius <= 0.:
raise ValueError("Outer radius must be positive and non-zero")
self._outer_radius = outer_radius
else:
self._outer_radius = const._av_radius
if fv_radius:
if fv_radius <= 0. or fv_radius > self._outer_radius:
raise ValueError("FV radius must be between zero and outer "
"radius")
self._fv_radius = fv_radius
else:
self._fv_radius = const._fv_radius
if scint_density:
self._scint_density = scint_density
else:
self._scint_density = const._scint_density
# Defaults to standard Gaussian efficiency for
# -1/2 sigma to +3/2 sigma ROI
self._roi_efficiency = roi_efficiency
if roi_efficiency != 0.62465:
print ("Warning: using calculated ROI efficiency %.4f "
"not default (0.62465)" % roi_efficiency)
def get_n_atoms(self, fv_radius=None, loading=None, scint_density=None,
target_mass=None, scint_mass=None, outer_radius=None):
""" Calculates the number of atoms of the double-beta isotope.
Set up to follow the full (SNO+-specific) calculation as per
SNO+-doc-1728v2 but can look at other scenarios/detectors by
overriding the default args.
.. warning:: All args default to SNO+ specific values!
Args:
fv_radius (float, optional): Radius of fiducial volume in mm.
Default is stored as a class variable.
loading (float, optional): Loading of isotope with 0 to 1
equivalent to 0% to 100%. Default is stored as a class
variable.
scint_density (float, optional): Density of liquid scintillator in
kg/mm^3. Default is stored as a class variable.
target_mass (float, optional): Target mass in kg. Calculates a
value by default.
scint_mass (float, optional): Mass of scintillator in kg.
Calculates a value by default.
outer_radius (float, optional): Radius of outer container
containing fiducial volume, e.g. AV, in mm. Default is stored
as a class variable.
Raises:
ValueError: If :obj:`loading` is not between zero and 1.
ValueError: If :obj:`outer_radius` is negative or zero.
ValueError: If :obj:`fv_radius` is not between zero and
:obj:`outer_radius`.
Returns:
float: Number of atoms.
"""
# Set defaults
if outer_radius is None: # use class variable
outer_radius = self._outer_radius
if outer_radius <= 0.:
raise ValueError("Outer radius must be positive and non-zero")
if fv_radius is None: # use class variable
fv_radius = self._fv_radius
if fv_radius <= 0. or fv_radius > outer_radius:
raise ValueError("FV radius must be between zero and outer radius")
if loading is None: # use class variable
loading = self._loading
if loading < 0. or loading > 1.:
raise ValueError("Loading ranges from 0 to 1")
if scint_density is None: # use class variable
scint_density = self._scint_density
if target_mass is None: # Calculate target mass
if scint_mass is None: # Calculate scint_mass
# Mass of scintillator
volume = (4./3.) * numpy.pi * outer_radius**3 # mm^3
scint_mass = scint_density * volume
# Mass fraction
mass_iso = self._atm_weight_iso*const._atomic_mass_unit # kg/atom
mass_nat = self._atm_weight_nat*const._atomic_mass_unit # kg/atom
mass_fraction = self._abundance*mass_iso/mass_nat
# Volume fraction
volume_fraction = fv_radius**3 / outer_radius**3
target_mass = mass_fraction * volume_fraction * loading *\
scint_mass
n_atoms = (target_mass*const._n_avagadro) /\
(self._atm_weight_iso*1.e-3)
return n_atoms
def half_life_to_activity(self, half_life, n_atoms=None):
""" Calculates the activity for an isotope with a given half-life
and number of atoms.
Args:
half_life (float): Half-life of an isotope in years.
n_atoms (float, optional): Number of atoms of an isotope.
Returns:
float: Activity in decays per year.
"""
if n_atoms is None: # Calculate n_atoms from class variables
n_atoms = self.get_n_atoms()
return (numpy.log(2)/half_life)*n_atoms
def activity_to_half_life(self, activity, n_atoms=None):
""" Calculates the half-life of an isotope with a given
activity and number of atoms.
Args:
activity (float): Activity of the isotope in
:math:`years^{-1}`.
n_atoms (float, optional): Number of atoms of an isotope.
Returns:
float: Half-life in years.
"""
if n_atoms is None: # Calculate n_atoms from class variables
n_atoms = self.get_n_atoms()
return numpy.log(2)*n_atoms/activity
def eff_mass_to_half_life(self, eff_mass):
""" Converts from effective majorana mass to :math:`0\\nu2\\beta`
half-life.
Args:
eff_mass (float): Effective majorana mass, in eV.
Raises:
ValueError: If effective mass is not positive and non-zero.
Returns:
float: :math:`0\\nu2\\beta` half-life, in years.
"""
if eff_mass <= 0.:
raise ValueError("Effective mass should be positive and non-zero")
sq_mass_ratio = eff_mass**2/const._electron_mass**2
return 1/(self._phase_space*self._matrix_element**2*sq_mass_ratio)
def half_life_to_eff_mass(self, half_life):
""" Converts from :math:`0\\nu2\\beta` half-life to effective
majorana mass.
Args:
half_life (float): :math:`0\\nu2\\beta` half-life, in years.
Returns:
float: Effective majorana mass, in eV.
"""
return numpy.sqrt(const._electron_mass ** 2 /
(self._phase_space * self._matrix_element ** 2 *
half_life))
def activity_to_counts(self, activity, roi_cut=True, livetime=5.):
""" Converts activity to number of counts, assuming constant activity.
Args:
activity (float): Initial activity of the isotope in
:math:`years^{-1}`.
roi_cut (bool, optional): If True (default) calculates counts
in the ROI, not counts in the full spectrum.
livetime (float, optional): Amount of years of data taking.
Default is 5 years.
Raises:
ValueError: If :obj:`livetime` is not positive and non-zero.
Returns:
float: Number of counts.
"""
if livetime <= 0.:
raise ValueError("Livetime should be positive and non zero")
if roi_cut:
return activity*livetime*self._roi_efficiency
else:
return activity*livetime
def counts_to_activity(self, counts, roi_cut=True, livetime=5.):
""" Converts counts to activity, assuming constant activity.
Args:
counts (float): Number of counts.
roi_cut (bool, optional): If True (default) assumes counts
in the ROI, not counts in the full spectrum.
livetime (float, optional): Amount of years of data taking.
Default is 5 years.
Raises:
ValueError: If :obj:`livetime` is not positive and non-zero.
Returns:
float: Activity of the isotope in :math:`years^{-1}`.
"""
if livetime <= 0.:
raise ValueError("Livetime should be positive and non zero")
if roi_cut:
return counts/(livetime*self._roi_efficiency)
else:
return counts/livetime
def counts_to_eff_mass(self, counts, n_atoms=None,
roi_cut=True, livetime=5.):
""" Converts from signal counts to effective majorana mass.
Args:
counts (float): Number of signal counts within the livetime
specified.
n_atoms (float, optional): Number of isotope atoms/nuclei that could
potentially decay to produce signal.
roi_cut (bool, optional): If True (default) assumes counts
in the ROI, not counts in the full spectrum.
livetime (float, optional): Amount of years of data taking.
Default is 5 years.
Raises:
ValueError: If :obj:`livetime` is not positive and non-zero.
Returns:
float: Effective majorana mass in eV.
"""
if n_atoms is None: # Calculate n_atoms from class variables
n_atoms = self.get_n_atoms()
if livetime <= 0.:
raise ValueError("Livetime should be positive and non zero")
half_life = self.counts_to_half_life(counts, n_atoms,
roi_cut, livetime)
return self.half_life_to_eff_mass(half_life)
def eff_mass_to_counts(self, eff_mass, n_atoms=None,
roi_cut=True, livetime=5.):
""" Converts from effective majorana mass to signal counts.
Args:
eff_mass (float): Effective majorana mass in eV.
n_atoms (float, optional): Number of isotope atoms/nuclei that could
potentially decay to produce signal.
roi_cut (bool, optional): If True (default) calculates counts
in the ROI, not counts in the full spectrum.
livetime (float, optional): Amount of years of data taking.
Default is 5 years.
Raises:
ValueError: If effective mass is not positive and non-zero.
ValueError: If arg:`livetime` is not positive and non-zero.
Returns:
float: Expected number of signal counts within the livetime
specified.
"""
if eff_mass <= 0.:
raise ValueError("Effective mass should be positive and non-zero")
if n_atoms is None: # Calculate n_atoms from class variables
n_atoms = self.get_n_atoms()
if livetime <= 0.:
raise ValueError("Livetime should be positive and non zero")
half_life = self.eff_mass_to_half_life(eff_mass)
return self.half_life_to_counts(half_life, n_atoms, roi_cut, livetime)
def half_life_to_counts(self, half_life, n_atoms=None,
roi_cut=True, livetime=5.):
""" Converts from isotope's half-life to signal counts.
Args:
half_life (float): Isotope's :math:`0\\nu2\\beta` half-life in
years.
n_atoms (float, optional): Number of isotope atoms/nuclei that could
potentially decay to produce signal.
roi_cut (bool, optional): If True (default) calculates counts
in the ROI, not counts in the full spectrum.
livetime (float, optional): Amount of years of data taking.
Default is 5 years.
Raises:
ValueError: If :obj:`livetime` is not positive and non-zero.
Returns:
float: Expected number of counts.
"""
if n_atoms is None: # Calculate n_atoms from class variables
n_atoms = self.get_n_atoms()
if livetime <= 0.:
raise ValueError("Livetime should be positive and non zero")
activity = self.half_life_to_activity(half_life, n_atoms)
return self.activity_to_counts(activity, roi_cut, livetime)
def counts_to_half_life(self, counts, n_atoms=None,
roi_cut=True, livetime=5.):
""" Converts from signal count to isotope's half-life.
Args:
count (float): Number of signal counts within the livetime
specified.
n_atoms (float, optional): Number of isotope atoms/nuclei that could
potentially decay to produce signal.
roi_cut (bool, optional): If True (default) assumes counts
in the ROI, not counts in the full spectrum.
livetime (float, optional): Amount of years of data taking.
Default is 5 years.
Raises:
ValueError: If :obj:`livetime` is not positive and non-zero.
Returns:
float: Isotope's :math:`0\\nu2\\beta` half-life in years.
"""
if n_atoms is None: # Calculate n_atoms from class variables
n_atoms = self.get_n_atoms()
if livetime <= 0.:
raise ValueError("Livetime should be positive and non zero")
activity = self.counts_to_activity(counts, roi_cut, livetime)
return self.activity_to_half_life(activity, n_atoms)
def test(args):
""" Test function to show agreement with Andy's numbers.
Args:
args (dict): Command line arguments from :mod:`argparse`
"""
# Load signal
signal = store.load(args.signal)
# Cut to 3.5m FV and 5 year livetime
signal.shrink(0.0, 10.0, 0.0, 3500.0, 0.0, 5.0)
# Shrink to ROI
signal.shrink_to_roi(2.46, 2.68, 0) # ROI used by Andy
print "============"
print "decay module"
print "------------"
# Check results of each function
# Create instance of DBIsotope for Te130
Te130_atm_weight = 129.906229 # SNO+-doc-1728v2
TeNat_atm_weight = 127.6 # SNO+-doc-1728v2
Te130_abundance = 0.3408 # SNO+-doc-1728v2
phase_space = 3.69e-14 # PRC 85, 034316 (2012)
matrix_element = 4.03 # IBM-2 PRC 87, 014315 (2013)
if args.roi_efficiency:
te130_converter = DBIsotope("Te130", Te130_atm_weight,
TeNat_atm_weight, Te130_abundance,
phase_space, matrix_element,
signal.get_roi(0).get("efficiency"))
else:
te130_converter = DBIsotope("Te130", Te130_atm_weight,
TeNat_atm_weight, Te130_abundance,
phase_space, matrix_element)
# Check get_n_atoms for 0.3% loading, no FV cut
expected = 3.7573e27 # SNO+-doc-1728v2
result, message = physics_tests.test_function_float(
te130_converter.get_n_atoms, expected, fv_radius=const._av_radius)
print message, "(no FV cut)"
# Check get_n_atoms with SNO+ defaults
# calculated - A Back 2015-02-25, based on SNO+-doc-1728v2
expected = 7.4694e26
result, message = physics_tests.test_function_float(
te130_converter.get_n_atoms, expected)
print message
# Create a DBIsotope instance for KLZ
# Molar Mass Calculator, http://www.webqc.org/mmcalc.php, 2015-05-07
Xe136_atm_weight = 135.907219
# Molar Mass Calculator, http://www.webqc.org/mmcalc.php, 2015-06-03
Xe134_atm_weight = 133.90539450
# We want the atomic weight of the enriched Xenon
XeEn_atm_weight = 0.9093*Xe136_atm_weight + 0.0889*Xe134_atm_weight
Xe136_abundance = 0.9093 # PRC 86, 021601 (2012)
phase_space = 1433.0e-17 # PRC 85, 034316 (2012)
matrix_element = 3.33 # IBM-2 PRC 87, 014315 (2013)
fv_radius = 1200. # mm, PRC 86, 021601 (2012)
loading = 0.0244 # 2.44%, PRC 86, 021601 (2012)
scint_density = 756.28e-9 # kg/mm^3 calculated A Back 2015-07-22
outer_radius = 1540. # mm, PRC 86, 021601 (2012)
xe136_converter = DBIsotope("Xe136", Xe136_atm_weight, XeEn_atm_weight,
Xe136_abundance, phase_space, matrix_element,
loading, fv_radius, outer_radius,
scint_density)
expected = 5.3985e+26 # Calculated - A Back 2015-06-30
result, message = physics_tests.test_function_float(
xe136_converter.get_n_atoms, expected,
fv_radius=fv_radius, loading=loading,
scint_density=scint_density, outer_radius=outer_radius)
print message, "(KamLAND-Zen)"
# Check half_life_to_activity
expected = 50.4 # /y, SNO+-doc-2593v8
half_life = 5.17e25 # y, SNO+-doc-2593v8 (3 sigma FC limit @ 5 y livetime)
result, message = physics_tests.test_function_float(
te130_converter.half_life_to_activity, expected, half_life=half_life,
n_atoms=te130_converter.get_n_atoms(fv_radius=const._av_radius))
print message, "(no FV cut)"
# Check activity_to_half_life
expected = 5.17e25 # y, SNO+-doc-2593v8
activity = 50.4 # /y, SNO+-doc-2593v8
result, message = physics_tests.test_function_float(
te130_converter.activity_to_half_life, expected, activity=activity,
n_atoms=te130_converter.get_n_atoms(fv_radius=const._av_radius))
print message, "(no FV cut)"
# Check eff_mass_to_half_life
expected = 4.37e25 # y, SNO+-doc-2593v8 (90% CL @ 1 y livetime)
eff_mass = 0.0999 # eV, SNO+-doc-2593v8
result, message = physics_tests.test_function_float(
te130_converter.eff_mass_to_half_life, expected, eff_mass=eff_mass)
print message
# Check half_life_to_eff_mass
expected = 0.0999 # eV, SNO+-doc-2593v8
half_life = 4.37e25 # y, SNO+-doc-2593v8
result, message = physics_tests.test_function_float(
te130_converter.half_life_to_eff_mass, expected, half_life=half_life)
print message
# Check activity_to_counts
livetime = 5.0
# ROI counts, SNO+-doc-2593v8 (3 sigma FC limit @ 5 y livetime)
expected = 31.2
# /y SNO+-doc-2593v8 - adjusted to FV
activity = 50.4 * (const._fv_radius**3/const._av_radius**3)
result, message = physics_tests.test_function_float(
te130_converter.activity_to_counts, expected, activity=activity,
livetime=livetime, roi_cut=True)
print message
# Check counts_to_activity
# /y SNO+-doc-2593v8 - adjusted to FV
expected = 50.4 * (const._fv_radius**3/const._av_radius**3)
counts = 31.2 # ROI counts, SNO+-doc-2593v8
result, message = physics_tests.test_function_float(
te130_converter.counts_to_activity, expected, counts=counts,
livetime=livetime, roi_cut=True)
print message
# Check counts_to_eff_mass
# eV, SNO+-doc-2593v8 (3 sigma @ 5 y livetime)
expected = te130_converter.half_life_to_eff_mass(5.17e25)
counts = 31.2 # ROI counts, SNO+-doc-2593v8 (3 sigma CL @ 5 y livetime)
result, message = physics_tests.test_function_float(
te130_converter.counts_to_eff_mass,
expected, counts=counts, roi_cut=True)
print message
# Check eff_mass_to_counts
expected = 31.2 # ROI counts, SNO+-doc-2593v8 (3 sigma CL @ 5 y livetime)
# eV, SNO+-doc-2593v8 (3 sigma @ 5 y livetime)
eff_mass = te130_converter.half_life_to_eff_mass(5.17e25)
result, message = physics_tests.test_function_float(
te130_converter.eff_mass_to_counts,
expected, eff_mass=eff_mass, roi_cut=True)
print message
# Check half_life_to_counts
expected = 31.2 # ROI counts, SNO+-doc-2593v8
half_life = 5.17e25 # y, SNO+-doc-2593v8 (3 sigma @ 5 y livetime)
result, message = physics_tests.test_function_float(
te130_converter.half_life_to_counts,
expected, half_life=half_life, roi_cut=True)
print message
# Check counts_to_half_life
expected = 5.17e25 # y, SNO+-doc-2593v8
counts = 31.2 # ROI counts, SNO+-doc-2593v8
result, message = physics_tests.test_function_float(
te130_converter.counts_to_half_life,
expected, counts=counts, roi_cut=True)
print message
print "============"
if __name__ == "__main__":
import argparse
from echidna.scripts.zero_nu_limit import ReadableDir
import echidna.output.store as store
parser = argparse.ArgumentParser(description="Example DBIsotpe calculator "
"script and validation.")
parser.add_argument("-s", "--signal", action=ReadableDir,
help="Supply path for signal hdf5 file")
parser.add_argument("-e", "--roi_efficiency", action="store_true",
help="If ROI efficiency should be calculated to "
"override default value")
args = parser.parse_args()
test(args)
| mit |
bmotlaghFLT/FLT_PhantomJS | src/qt/qtwebkit/Tools/Scripts/webkitpy/layout_tests/servers/http_server_integrationtest.py | 118 | 5561 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Integration tests for the new-run-webkit-httpd and new-run-webkit-websocketserver scripts"""
# FIXME: Rename this file to something more descriptive.
import errno
import os
import socket
import subprocess
import sys
import tempfile
import unittest2 as unittest
class BaseTest(unittest.TestCase):
"""Basic framework for script tests."""
HOST = 'localhost'
# Override in actual test classes.
PORTS = None
SCRIPT_NAME = None
def assert_servers_are_down(self, ports=None):
ports = ports or self.PORTS
for port in ports:
try:
test_socket = socket.socket()
test_socket.connect((self.HOST, port))
self.fail()
except IOError, e:
self.assertTrue(e.errno in (errno.ECONNREFUSED, errno.ECONNRESET))
finally:
test_socket.close()
def assert_servers_are_up(self, ports=None):
ports = ports or self.PORTS
for port in ports:
try:
test_socket = socket.socket()
test_socket.connect((self.HOST, port))
except IOError, e:
self.fail('failed to connect to %s:%d' % (self.HOST, port))
finally:
test_socket.close()
def run_script(self, args):
script_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
script_path = os.path.join(script_dir, self.SCRIPT_NAME)
return subprocess.call([sys.executable, script_path] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def integration_test_server__normal(self):
if not self.SCRIPT_NAME:
return
self.assert_servers_are_down()
self.assertEqual(self.run_script(['--server', 'start']), 0)
self.assert_servers_are_up()
self.assertEqual(self.run_script(['--server', 'stop']), 0)
self.assert_servers_are_down()
def integration_test_server__fails(self):
if not self.SCRIPT_NAME:
return
# Test that if a port isn't available, the call fails.
for port_number in self.PORTS:
test_socket = socket.socket()
try:
try:
test_socket.bind((self.HOST, port_number))
except socket.error, e:
if e.errno in (errno.EADDRINUSE, errno.EALREADY):
self.fail('could not bind to port %d: %s' % (port_number, str(e)))
raise
self.assertEqual(self.run_script(['--server', 'start']), 1)
finally:
self.run_script(['--server', 'stop'])
test_socket.close()
# Test that calling stop() twice is harmless.
self.assertEqual(self.run_script(['--server', 'stop']), 0)
def maybe_make_dir(self, *comps):
try:
os.makedirs(os.path.join(*comps))
except OSError, e:
if e.errno != errno.EEXIST:
raise
def integration_test_port_and_root(self):
if not self.SCRIPT_NAME:
return
tmpdir = tempfile.mkdtemp(prefix='webkitpytest')
self.maybe_make_dir(tmpdir, 'http', 'tests', 'websocket')
self.maybe_make_dir(tmpdir, 'fast', 'js', 'resources')
self.maybe_make_dir(tmpdir, 'media')
self.assert_servers_are_down([18000])
self.assertEqual(self.run_script(['--server', 'start', '--port=18000', '--root', tmpdir]), 0)
self.assert_servers_are_up([18000])
self.assertEqual(self.run_script(['--server', 'stop']), 0)
self.assert_servers_are_down([18000])
class HTTPServerTest(BaseTest):
"""Tests that new-run-webkit-http must pass."""
PORTS = (8000, 8080, 8443)
SCRIPT_NAME = 'new-run-webkit-httpd'
class WebsocketserverTest(BaseTest):
"""Tests that new-run-webkit-websocketserver must pass."""
# FIXME: test TLS at some point?
PORTS = (8880, )
SCRIPT_NAME = 'new-run-webkit-websocketserver'
| bsd-3-clause |
dkodnik/Ant | openerp/tools/which.py | 456 | 6884 | #!/usr/bin/env python
""" Which - locate a command
* adapted from Brian Curtin's http://bugs.python.org/file15381/shutil_which.patch
* see http://bugs.python.org/issue444582
* uses ``PATHEXT`` on Windows
* searches current directory before ``PATH`` on Windows,
but not before an explicitly passed path
* accepts both string or iterable for an explicitly passed path, or pathext
* accepts an explicitly passed empty path, or pathext (either '' or [])
* does not search ``PATH`` for files that have a path specified in their name already
* moved defpath and defpathext lists initialization to module level,
instead of initializing them on each function call
* changed interface: which_files() returns generator, which() returns first match,
or raises IOError(errno.ENOENT)
.. function:: which_files(file [, mode=os.F_OK | os.X_OK[, path=None[, pathext=None]]])
Return a generator which yields full paths in which the *file* name exists
in a directory that is part of the file name, or on *path*,
and has the given *mode*.
By default, *mode* matches an inclusive OR of os.F_OK and os.X_OK - an
existing executable file.
The *path* is, by default, the ``PATH`` variable on the platform,
or the string/iterable passed in as *path*.
In the event that a ``PATH`` variable is not found, :const:`os.defpath` is used.
On Windows, a current directory is searched before using the ``PATH`` variable,
but not before an explicitly passed *path*.
The *pathext* is only used on Windows to match files with given extensions appended as well.
It defaults to the ``PATHEXT`` variable, or the string/iterable passed in as *pathext*.
In the event that a ``PATHEXT`` variable is not found,
default value for Windows XP/Vista is used.
The command is always searched without extension first,
even when *pathext* is explicitly passed.
.. function:: which(file [, mode=os.F_OK | os.X_OK[, path=None[, pathext=None]]])
Return first match generated by which_files(file, mode, path, pathext),
or raise IOError(errno.ENOENT).
"""
__docformat__ = 'restructuredtext en'
__all__ = 'which which_files pathsep defpath defpathext F_OK R_OK W_OK X_OK'.split()
import sys
from os import access, defpath, pathsep, environ, F_OK, R_OK, W_OK, X_OK
from os.path import exists, dirname, split, join
windows = sys.platform.startswith('win')
defpath = environ.get('PATH', defpath).split(pathsep)
if windows:
defpath.insert(0, '.') # can insert without checking, when duplicates are removed
# given the quite usual mess in PATH on Windows, let's rather remove duplicates
seen = set()
defpath = [dir for dir in defpath if dir.lower() not in seen and not seen.add(dir.lower())]
del seen
defpathext = [''] + environ.get('PATHEXT',
'.COM;.EXE;.BAT;.CMD;.VBS;.VBE;.JS;.JSE;.WSF;.WSH;.MSC').lower().split(pathsep)
else:
defpathext = ['']
def which_files(file, mode=F_OK | X_OK, path=None, pathext=None):
""" Locate a file in a path supplied as a part of the file name,
or the user's path, or a supplied path.
The function yields full paths (not necessarily absolute paths),
in which the given file name matches an existing file in a directory on the path.
>>> def test_which(expected, *args, **argd):
... result = list(which_files(*args, **argd))
... assert result == expected, 'which_files: %s != %s' % (result, expected)
...
... try:
... result = [ which(*args, **argd) ]
... except IOError:
... result = []
... assert result[:1] == expected[:1], 'which: %s != %s' % (result[:1], expected[:1])
>>> if windows: cmd = environ['COMSPEC']
>>> if windows: test_which([cmd], 'cmd')
>>> if windows: test_which([cmd], 'cmd.exe')
>>> if windows: test_which([cmd], 'cmd', path=dirname(cmd))
>>> if windows: test_which([cmd], 'cmd', pathext='.exe')
>>> if windows: test_which([cmd], cmd)
>>> if windows: test_which([cmd], cmd, path='<nonexistent>')
>>> if windows: test_which([cmd], cmd, pathext='<nonexistent>')
>>> if windows: test_which([cmd], cmd[:-4])
>>> if windows: test_which([cmd], cmd[:-4], path='<nonexistent>')
>>> if windows: test_which([], 'cmd', path='<nonexistent>')
>>> if windows: test_which([], 'cmd', pathext='<nonexistent>')
>>> if windows: test_which([], '<nonexistent>/cmd')
>>> if windows: test_which([], cmd[:-4], pathext='<nonexistent>')
>>> if not windows: sh = '/bin/sh'
>>> if not windows: test_which([sh], 'sh')
>>> if not windows: test_which([sh], 'sh', path=dirname(sh))
>>> if not windows: test_which([sh], 'sh', pathext='<nonexistent>')
>>> if not windows: test_which([sh], sh)
>>> if not windows: test_which([sh], sh, path='<nonexistent>')
>>> if not windows: test_which([sh], sh, pathext='<nonexistent>')
>>> if not windows: test_which([], 'sh', mode=W_OK) # not running as root, are you?
>>> if not windows: test_which([], 'sh', path='<nonexistent>')
>>> if not windows: test_which([], '<nonexistent>/sh')
"""
filepath, file = split(file)
if filepath:
path = (filepath,)
elif path is None:
path = defpath
elif isinstance(path, str):
path = path.split(pathsep)
if pathext is None:
pathext = defpathext
elif isinstance(pathext, str):
pathext = pathext.split(pathsep)
if not '' in pathext:
pathext.insert(0, '') # always check command without extension, even for custom pathext
for dir in path:
basepath = join(dir, file)
for ext in pathext:
fullpath = basepath + ext
if exists(fullpath) and access(fullpath, mode):
yield fullpath
def which(file, mode=F_OK | X_OK, path=None, pathext=None):
""" Locate a file in a path supplied as a part of the file name,
or the user's path, or a supplied path.
The function returns full path (not necessarily absolute path),
in which the given file name matches an existing file in a directory on the path,
or raises IOError(errno.ENOENT).
>>> # for doctest see which_files()
"""
try:
return iter(which_files(file, mode, path, pathext)).next()
except StopIteration:
try:
from errno import ENOENT
except ImportError:
ENOENT = 2
raise IOError(ENOENT, '%s not found' % (mode & X_OK and 'command' or 'file'), file)
if __name__ == '__main__':
import doctest
doctest.testmod()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
richardcs/ansible | lib/ansible/modules/network/fortimanager/fmgr_device.py | 7 | 9564 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community"
}
DOCUMENTATION = '''
---
module: fmgr_device
version_added: "2.8"
author:
- Luke Weighall (@lweighall)
- Andrew Welsh (@Ghilli3)
- Jim Huber (@p4r4n0y1ng)
short_description: Add or remove device
description:
- Add or remove a device or list of devices from FortiManager Device Manager using JSON RPC API.
options:
adom:
description:
- The ADOM the configuration should belong to.
required: true
default: root
host:
description:
- The FortiManager's address.
required: true
username:
description:
- The username used to authenticate with the FortiManager.
required: false
password:
description:
- The password associated with the username account.
required: false
state:
description:
- The desired state of the specified object.
- absent will delete the object if it exists.
- present will create the configuration if needed.
required: false
default: present
choices: ["absent", "present"]
device_username:
description:
- The username of the device being added to FortiManager.
required: false
device_password:
description:
- The password of the device being added to FortiManager.
required: false
device_ip:
description:
- The IP of the device being added to FortiManager. Supports both IPv4 and IPv6.
required: false
device_unique_name:
description:
- The desired "friendly" name of the device being added to FortiManager.
required: false
device_serial:
description:
- The serial number of the device being added to FortiManager.
required: false
'''
EXAMPLES = '''
- name: DISCOVER AND ADD DEVICE FGT1
fmgr_device:
host: "{{inventory_hostname}}"
username: "{{ username }}"
password: "{{ password }}"
adom: "root"
device_username: "admin"
device_password: "admin"
device_ip: "10.10.24.201"
device_unique_name: "FGT1"
device_serial: "FGVM000000117994"
state: "present"
- name: DISCOVER AND ADD DEVICE FGT2
fmgr_device:
host: "{{inventory_hostname}}"
username: "{{ username }}"
password: "{{ password }}"
adom: "root"
device_username: "admin"
device_password: "admin"
device_ip: "10.10.24.202"
device_unique_name: "FGT2"
device_serial: "FGVM000000117992"
state: "absent"
'''
RETURN = """
api_result:
description: full API response, includes status code and message
returned: always
type: string
"""
from ansible.module_utils.basic import AnsibleModule, env_fallback
from ansible.module_utils.network.fortimanager.fortimanager import AnsibleFortiManager
# check for pyFMG lib
try:
from pyFMG.fortimgr import FortiManager
HAS_PYFMGR = True
except ImportError:
HAS_PYFMGR = False
def discover_device(fmg, paramgram):
"""
This method is used to discover devices before adding them to FMGR
"""
datagram = {
"odd_request_form": "True",
"device": {"adm_usr": paramgram["device_username"],
"adm_pass": paramgram["device_password"],
"ip": paramgram["device_ip"]}
}
url = '/dvm/cmd/discover/device/'
response = fmg.execute(url, datagram)
return response
def add_device(fmg, paramgram):
"""
This method is used to add devices to the FMGR
"""
datagram = {
"adom": paramgram["adom"],
"flags": ["create_task", "nonblocking"],
"odd_request_form": "True",
"device": {"adm_usr": paramgram["device_username"], "adm_pass": paramgram["device_password"],
"ip": paramgram["device_ip"], "name": paramgram["device_unique_name"],
"sn": paramgram["device_serial"], "mgmt_mode": "fmgfaz", "flags": 24}
}
url = '/dvm/cmd/add/device/'
response = fmg.execute(url, datagram)
return response
def delete_device(fmg, paramgram):
"""
This method deletes a device from the FMGR
"""
datagram = {
"adom": paramgram["adom"],
"flags": ["create_task", "nonblocking"],
"odd_request_form": "True",
"device": paramgram["device_unique_name"],
}
url = '/dvm/cmd/del/device/'
response = fmg.execute(url, datagram)
return response
# FUNCTION/METHOD FOR LOGGING OUT AND ANALYZING ERROR CODES
def fmgr_logout(fmg, module, msg="NULL", results=(), good_codes=(0,), logout_on_fail=True, logout_on_success=False):
"""
THIS METHOD CONTROLS THE LOGOUT AND ERROR REPORTING AFTER AN METHOD OR FUNCTION RUNS
"""
# VALIDATION ERROR (NO RESULTS, JUST AN EXIT)
if msg != "NULL" and len(results) == 0:
try:
fmg.logout()
except:
pass
module.fail_json(msg=msg)
# SUBMISSION ERROR
if len(results) > 0:
if msg == "NULL":
try:
msg = results[1]['status']['message']
except:
msg = "No status message returned from pyFMG. Possible that this was a GET with a tuple result."
if results[0] not in good_codes:
if logout_on_fail:
fmg.logout()
module.fail_json(msg=msg, **results[1])
else:
return_msg = msg + " -- LOGOUT ON FAIL IS OFF, MOVING ON"
return return_msg
else:
if logout_on_success:
fmg.logout()
module.exit_json(msg=msg, **results[1])
else:
return_msg = msg + " -- LOGOUT ON SUCCESS IS OFF, MOVING ON TO REST OF CODE"
return return_msg
def main():
argument_spec = dict(
adom=dict(required=False, type="str", default="root"),
host=dict(required=True, type="str"),
username=dict(fallback=(env_fallback, ["ANSIBLE_NET_USERNAME"])),
password=dict(fallback=(env_fallback, ["ANSIBLE_NET_PASSWORD"]), no_log=True),
state=dict(choices=["absent", "present"], type="str", default="present"),
device_ip=dict(required=False, type="str"),
device_username=dict(required=False, type="str"),
device_password=dict(required=False, type="str", no_log=True),
device_unique_name=dict(required=True, type="str"),
device_serial=dict(required=False, type="str")
)
module = AnsibleModule(argument_spec, supports_check_mode=True,)
# handle params passed via provider and insure they are represented as the data type expected by fortimanagerd
paramgram = {
"device_ip": module.params["device_ip"],
"device_username": module.params["device_username"],
"device_password": module.params["device_password"],
"device_unique_name": module.params["device_unique_name"],
"device_serial": module.params["device_serial"],
"adom": module.params["adom"],
"state": module.params["state"]
}
# validate required arguments are passed; not used in argument_spec to allow params to be called from provider
# check if params are set
if module.params["host"] is None or module.params["username"] is None or module.params["password"] is None:
module.fail_json(msg="Host and username are required for connection")
# CHECK IF LOGIN FAILED
fmg = AnsibleFortiManager(module, module.params["host"], module.params["username"], module.params["password"])
response = fmg.login()
if response[1]['status']['code'] != 0:
module.fail_json(msg="Connection to FortiManager Failed")
else:
# START SESSION LOGIC
results = (-100000, {"msg": "Nothing Happened."})
if paramgram["state"] == "present":
# add device
results = discover_device(fmg, paramgram)
if results[0] != 0:
if results[0] == -20042:
fmgr_logout(fmg, module, msg="Couldn't contact device on network", results=results, good_codes=[0])
else:
fmgr_logout(fmg, module, msg="Discovering Device Failed", results=results, good_codes=[0])
if results[0] == 0:
results = add_device(fmg, paramgram)
if results[0] != 0 and results[0] != -20010:
fmgr_logout(fmg, module, msg="Adding Device Failed", results=results, good_codes=[0])
if paramgram["state"] == "absent":
# remove device
results = delete_device(fmg, paramgram)
if results[0] != 0:
fmgr_logout(fmg, module, msg="Deleting Device Failed", results=results, good_codes=[0])
fmg.logout()
return module.exit_json(**results[1])
if __name__ == "__main__":
main()
| gpl-3.0 |
illicitonion/givabit | lib/sdks/google_appengine_1.7.1/google_appengine/lib/django_0_96/django/core/cache/backends/db.py | 33 | 3522 | "Database cache backend."
from django.core.cache.backends.base import BaseCache
from django.db import connection, transaction, DatabaseError
import base64, time
from datetime import datetime
try:
import cPickle as pickle
except ImportError:
import pickle
class CacheClass(BaseCache):
def __init__(self, table, params):
BaseCache.__init__(self, params)
self._table = table
max_entries = params.get('max_entries', 300)
try:
self._max_entries = int(max_entries)
except (ValueError, TypeError):
self._max_entries = 300
cull_frequency = params.get('cull_frequency', 3)
try:
self._cull_frequency = int(cull_frequency)
except (ValueError, TypeError):
self._cull_frequency = 3
def get(self, key, default=None):
cursor = connection.cursor()
cursor.execute("SELECT cache_key, value, expires FROM %s WHERE cache_key = %%s" % self._table, [key])
row = cursor.fetchone()
if row is None:
return default
now = datetime.now()
if row[2] < now:
cursor.execute("DELETE FROM %s WHERE cache_key = %%s" % self._table, [key])
transaction.commit_unless_managed()
return default
return pickle.loads(base64.decodestring(row[1]))
def set(self, key, value, timeout=None):
if timeout is None:
timeout = self.default_timeout
cursor = connection.cursor()
cursor.execute("SELECT COUNT(*) FROM %s" % self._table)
num = cursor.fetchone()[0]
now = datetime.now().replace(microsecond=0)
exp = datetime.fromtimestamp(time.time() + timeout).replace(microsecond=0)
if num > self._max_entries:
self._cull(cursor, now)
encoded = base64.encodestring(pickle.dumps(value, 2)).strip()
cursor.execute("SELECT cache_key FROM %s WHERE cache_key = %%s" % self._table, [key])
try:
if cursor.fetchone():
cursor.execute("UPDATE %s SET value = %%s, expires = %%s WHERE cache_key = %%s" % self._table, [encoded, str(exp), key])
else:
cursor.execute("INSERT INTO %s (cache_key, value, expires) VALUES (%%s, %%s, %%s)" % self._table, [key, encoded, str(exp)])
except DatabaseError:
# To be threadsafe, updates/inserts are allowed to fail silently
pass
else:
transaction.commit_unless_managed()
def delete(self, key):
cursor = connection.cursor()
cursor.execute("DELETE FROM %s WHERE cache_key = %%s" % self._table, [key])
transaction.commit_unless_managed()
def has_key(self, key):
cursor = connection.cursor()
cursor.execute("SELECT cache_key FROM %s WHERE cache_key = %%s" % self._table, [key])
return cursor.fetchone() is not None
def _cull(self, cursor, now):
if self._cull_frequency == 0:
cursor.execute("DELETE FROM %s" % self._table)
else:
cursor.execute("DELETE FROM %s WHERE expires < %%s" % self._table, [str(now)])
cursor.execute("SELECT COUNT(*) FROM %s" % self._table)
num = cursor.fetchone()[0]
if num > self._max_entries:
cursor.execute("SELECT cache_key FROM %s ORDER BY cache_key LIMIT 1 OFFSET %%s" % self._table, [num / self._cull_frequency])
cursor.execute("DELETE FROM %s WHERE cache_key < %%s" % self._table, [cursor.fetchone()[0]])
| apache-2.0 |
alangwansui/mtl_ordercenter | openerp/addons/account_payment/__init__.py | 436 | 1279 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#----------------------------------------------------------
# Init Sales
#----------------------------------------------------------
import account_payment
import wizard
import account_move_line
import account_invoice
import report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
NejcZupec/ggrc-core | test/integration/ggrc_workflows/converters/test_workflow_export_csv.py | 1 | 11972 | # Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Tests for workflow object exports."""
from os.path import abspath, dirname, join
from flask.json import dumps
from ggrc.app import app
from ggrc_workflows.models import Workflow
from integration.ggrc import TestCase
from integration.ggrc_workflows.generator import WorkflowsGenerator
THIS_ABS_PATH = abspath(dirname(__file__))
CSV_DIR = join(THIS_ABS_PATH, 'test_csvs/')
class TestExportEmptyTemplate(TestCase):
"""Test empty export for all workflow object types."""
def setUp(self):
self.client.get("/login")
self.headers = {
'Content-Type': 'application/json',
"X-Requested-By": "gGRC",
"X-export-view": "blocks",
}
def test_single_object_export(self):
"""Test empty exports for workflow only."""
data = [{"object_name": "Workflow", "fields": "all"}]
response = self.client.post("/_service/export_csv",
data=dumps(data), headers=self.headers)
self.assertEqual(response.status_code, 200)
self.assertIn("Title*", response.data)
def test_multiple_objects(self):
"""Test empty exports for all workflow object in one query."""
data = [
{"object_name": "Workflow", "fields": "all"},
{"object_name": "TaskGroup", "fields": "all"},
{"object_name": "TaskGroupTask", "fields": "all"},
{"object_name": "Cycle", "fields": "all"},
{"object_name": "CycleTaskGroup", "fields": "all"},
{"object_name": "CycleTaskGroupObjectTask", "fields": "all"},
]
response = self.client.post("/_service/export_csv",
data=dumps(data), headers=self.headers)
self.assertEqual(response.status_code, 200)
self.assertIn("Workflow,", response.data)
self.assertIn("Task Group,", response.data)
self.assertIn("Task,", response.data)
self.assertIn("Cycle,", response.data)
self.assertIn("Cycle Task Group,", response.data)
self.assertIn("Cycle Task Group Object Task,", response.data)
class TestExportMultipleObjects(TestCase):
""" Test data is found in the google sheet:
https://docs.google.com/spreadsheets/d/1Jg8jum2eQfvR3kZNVYbVKizWIGZXvfqv3yQpo2rIiD8/edit#gid=2035742544
"""
@classmethod
def setUpClass(cls): # pylint: disable=C0103
TestCase.clear_data()
cls.tc = app.test_client()
cls.tc.get("/login")
cls.import_file("workflow_big_sheet.csv")
@classmethod
def import_file(cls, filename, dry_run=False):
data = {"file": (open(join(CSV_DIR, filename)), filename)}
headers = {
"X-test-only": "true" if dry_run else "false",
"X-requested-by": "gGRC",
}
cls.tc.post("/_service/import_csv",
data=data, headers=headers)
def activate(self):
""" activate workflows just once after the class has been initialized
This should be in setUpClass method, but we can't access the server
context from there."""
gen = WorkflowsGenerator()
# generate cycle for the only one time wf
wf1 = Workflow.query.filter_by(status="Draft", slug="wf-1").first()
if wf1:
gen.generate_cycle(wf1)
workflows = Workflow.query.filter_by(status="Draft").all()
for wf in workflows:
gen.activate_workflow(wf)
def setUp(self):
self.client.get("/login")
self.headers = {
'Content-Type': 'application/json',
"X-Requested-By": "gGRC",
"X-export-view": "blocks",
}
self.activate()
def export_csv(self, data):
response = self.client.post("/_service/export_csv", data=dumps(data),
headers=self.headers)
self.assert200(response)
return response
def test_workflow_task_group_mapping(self):
""" test workflow and task group mappings """
data = [
{
"object_name": "Workflow", # wf-1
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "TaskGroup",
"slugs": ["tg-1"],
},
},
"fields": "all",
}, {
"object_name": "TaskGroup", # tg-1, tg-2
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "__previous__",
"ids": ["0"],
},
},
"fields": "all",
},
]
response = self.export_csv(data).data
self.assertEqual(3, response.count("wf-1")) # 1 for wf and 1 on each tg
self.assertIn("tg-1", response)
self.assertIn("tg-6", response)
def test_tg_task(self):
""" test task group and task mappings """
data = [
{
"object_name": "TaskGroupTask", # task-1, task-7
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "TaskGroup",
"slugs": ["tg-1"],
},
},
"fields": "all",
}, {
"object_name": "TaskGroup", # tg-1, tg-2
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "__previous__",
"ids": ["0"],
},
},
"fields": "all",
},
]
response = self.export_csv(data).data
self.assertEqual(3, response.count("tg-1")) # 2 for tasks and 1 for tg
self.assertIn("task-1", response)
self.assertIn("task-7", response)
def test_workflow_cycle_mapping(self):
""" test workflow and cycle mappings """
data = [
{
"object_name": "Cycle", # cycle with title wf-1
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "Workflow",
"slugs": ["wf-1"],
},
},
"fields": "all",
}, {
"object_name": "Workflow", # wf-1
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "__previous__",
"ids": ["0"],
},
},
"fields": "all",
}, {
"object_name": "CycleTaskGroup", # two cycle groups
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "__previous__",
"ids": ["0"],
},
},
"fields": "all",
}, {
"object_name": "Cycle", # sholud be same cycle as in first block
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "__previous__",
"ids": ["2"],
},
},
"fields": "all",
}, {
# Task mapped to any of the two task groups, 3 tasks
"object_name": "CycleTaskGroupObjectTask",
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "__previous__",
"ids": ["2"],
},
},
"fields": "all",
}, {
"object_name": "CycleTaskGroup", # two cycle groups
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "__previous__",
"ids": ["4"],
},
},
"fields": "all",
},
]
response = self.export_csv(data).data
self.assertEqual(3, response.count("wf-1")) # 2 for cycles and 1 for wf
# 3rd block = 2, 5th block = 3, 6th block = 2.
self.assertEqual(7, response.count("CYCLEGROUP-"))
self.assertEqual(9, response.count("CYCLE-"))
self.assertEqual(3, response.count("CYCLETASK-"))
def test_cycle_taks_objects(self):
""" test cycle task and various objects """
data = [
{
"object_name": "CycleTaskGroupObjectTask", #
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "Policy",
"slugs": ["p1"],
},
},
"fields": "all",
}, {
"object_name": "Policy", #
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "__previous__",
"ids": ["0"],
},
},
"fields": ["slug", "title"],
},
]
response = self.export_csv(data).data
self.assertEqual(2, response.count("CYCLETASK-"))
self.assertEqual(3, response.count(",p1,"))
def test_workflow_no_access_users(self):
""" test export of No Access users """
data = [
{
"object_name": "Workflow",
"fields": ["workflow_mapped"],
"filters": {
"expression": {}
}
}
]
response = self.export_csv(data).data
users = response.splitlines()[2:-2]
expected = [",[email protected]"] * 10
self.assertEqual(expected, users)
def test_wf_indirect_relevant_filters(self):
""" test related filter for indirect relationships on wf objects """
def block(obj):
return {
"object_name": obj,
"fields": ["slug"],
"filters": {
"expression": {
"object_name": "Policy",
"op": {"name": "relevant"},
"slugs": ["p1"],
},
},
}
data = [
block("Workflow"),
block("Cycle"),
block("CycleTaskGroup"),
block("CycleTaskGroupObjectTask"),
]
response = self.export_csv(data).data
wf = Workflow.query.filter_by(slug="wf-1").first()
cycle = wf.cycles[0]
cycle_tasks = []
for cycle_task in cycle.cycle_task_group_object_tasks:
is_related = False
for related_object in cycle_task.related_objects:
if related_object.slug == "p1":
is_related = True
if is_related:
cycle_tasks.append(cycle_task)
cycle_task_groups = list({cycle_task.cycle_task_group
for cycle_task in cycle_tasks})
self.assertEqual(1, response.count("wf-"))
self.assertRegexpMatches(response, ",{}[,\r\n]".format(wf.slug))
self.assertEqual(1, response.count("CYCLE-"))
self.assertRegexpMatches(response, ",{}[,\r\n]".format(cycle.slug))
self.assertEqual(1, response.count("CYCLEGROUP-"))
self.assertEqual(1, len(cycle_task_groups))
self.assertRegexpMatches(response, ",{}[,\r\n]".format(
cycle_task_groups[0].slug))
self.assertEqual(2, response.count("CYCLETASK-"))
self.assertEqual(2, len(cycle_tasks))
for cycle_task in cycle_tasks:
self.assertRegexpMatches(response, ",{}[,\r\n]".format(
cycle_task.slug))
destinations = [
("Workflow", wf.slug, 3),
("Cycle", cycle.slug, 3),
("CycleTaskGroupObjectTask", cycle_tasks[0].slug, 1),
("CycleTaskGroupObjectTask", cycle_tasks[1].slug, 1),
]
for object_name, slug, count in destinations:
data = [{
"object_name": "Policy",
"fields": ["slug"],
"filters": {
"expression": {
"object_name": object_name,
"op": {"name": "relevant"},
"slugs": [slug],
},
},
}]
response = self.export_csv(data).data
self.assertEqual(count, response.count(",p"), "Count for " + object_name)
self.assertIn(",p1", response)
| apache-2.0 |
michaelbuehlmann/CatAna | setup.py | 1 | 3178 | import os
import re
import sys
import subprocess
import platform
import versioneer
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
from distutils.version import LooseVersion
class CMakeExtension(Extension):
def __init__(self, name, sourcedir=''):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
class CMakeBuild(build_ext):
def run(self):
try:
out = subprocess.check_output(['cmake', '--version'])
except OSError:
raise RuntimeError("CMake must be installed to build the following extensions: " +
", ".join(e.name for e in self.extensions))
if platform.system() == "Windows":
cmake_version = LooseVersion(re.search(r'version\s*([\d.]+)', out.decode()).group(1))
if cmake_version < '3.1.0':
raise RuntimeError("CMake >=3.1.0 is required on Windows")
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
cmake_args = ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir,
'-DPYTHON_EXECUTABLE=' + sys.executable]
cfg = 'Debug' if self.debug else 'Release'
build_args = ['--config', cfg]
if platform.system() == "Windows":
cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(cfg.upper(), extdir)]
if sys.maxsize > 2**32:
cmake_args += ['-A', 'x64']
build_args += ['--', '/m']
else:
cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg]
build_args += ['--', '-j8']
env = os.environ.copy()
env['CXXFLAGS'] = '{} -DVERSION_INFO=\\"{}\\"'.format(env.get('CXXFLAGS', ''), self.distribution.get_version())
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
subprocess.check_call(['cmake', ext.sourcedir] + cmake_args, cwd=self.build_temp, env=env)
subprocess.check_call(['cmake', '--build', '.'] + build_args, cwd=self.build_temp)
def get_cmdclass():
cmdclass = versioneer.get_cmdclass()
cmdclass.update({"build_ext": CMakeBuild})
return cmdclass
def readme():
with open('README.rst') as f:
return f.read()
setup(
name='catana',
description='A library for Spherical Fourier Bessel (SFB) Decomposition',
long_description=readme(),
license='GPLv2',
author='Michael Buehlmann',
author_email='[email protected]',
url='https://github.com/michaelbuehlmann/CatAna',
keywords='SFB spherical Fourier Bessel transformation decomposition cosmology',
version=versioneer.get_version(),
ext_package='catana',
packages=['catana'],
package_dir = {'': 'python'},
ext_modules=[CMakeExtension('basictypes'), CMakeExtension('besseltools'), CMakeExtension('decomposition'), CMakeExtension('io')],
cmdclass=get_cmdclass(),
zip_safe=False,
install_requires=['numpy'],
setup_requires=['pytest-runner'],
tests_require=['pytest']
) | gpl-2.0 |
mandeepdhami/neutron | neutron/tests/unit/db/test_db_base_plugin_v2.py | 1 | 279583 | # Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import copy
import itertools
import mock
import netaddr
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_utils import importutils
import six
from sqlalchemy import orm
from testtools import matchers
import webob.exc
import neutron
from neutron.api import api_common
from neutron.api import extensions
from neutron.api.v2 import attributes
from neutron.api.v2 import router
from neutron.callbacks import exceptions
from neutron.callbacks import registry
from neutron.common import constants
from neutron.common import exceptions as n_exc
from neutron.common import ipv6_utils
from neutron.common import test_lib
from neutron.common import utils
from neutron import context
from neutron.db import db_base_plugin_v2
from neutron.db import ipam_non_pluggable_backend as non_ipam
from neutron.db import models_v2
from neutron import manager
from neutron.tests import base
from neutron.tests import tools
from neutron.tests.unit.api import test_extensions
from neutron.tests.unit import testlib_api
DB_PLUGIN_KLASS = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2'
DEVICE_OWNER_COMPUTE = 'compute:None'
DEVICE_OWNER_NOT_COMPUTE = constants.DEVICE_OWNER_DHCP
def optional_ctx(obj, fallback):
if not obj:
return fallback()
@contextlib.contextmanager
def context_wrapper():
yield obj
return context_wrapper()
def _fake_get_pagination_helper(self, request):
return api_common.PaginationEmulatedHelper(request, self._primary_key)
def _fake_get_sorting_helper(self, request):
return api_common.SortingEmulatedHelper(request, self._attr_info)
# TODO(banix): Move the following method to ML2 db test module when ML2
# mechanism driver unit tests are corrected to use Ml2PluginV2TestCase
# instead of directly using NeutronDbPluginV2TestCase
def _get_create_db_method(resource):
ml2_method = '_create_%s_db' % resource
if hasattr(manager.NeutronManager.get_plugin(), ml2_method):
return ml2_method
else:
return 'create_%s' % resource
class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
fmt = 'json'
resource_prefix_map = {}
def setUp(self, plugin=None, service_plugins=None,
ext_mgr=None):
super(NeutronDbPluginV2TestCase, self).setUp()
cfg.CONF.set_override('notify_nova_on_port_status_changes', False)
cfg.CONF.set_override('allow_overlapping_ips', True)
# Make sure at each test according extensions for the plugin is loaded
extensions.PluginAwareExtensionManager._instance = None
# Save the attributes map in case the plugin will alter it
# loading extensions
self.useFixture(tools.AttributeMapMemento())
self._tenant_id = 'test-tenant'
if not plugin:
plugin = DB_PLUGIN_KLASS
# Update the plugin
self.setup_coreplugin(plugin)
cfg.CONF.set_override(
'service_plugins',
[test_lib.test_config.get(key, default)
for key, default in six.iteritems(service_plugins or {})]
)
cfg.CONF.set_override('base_mac', "12:34:56:78:90:ab")
cfg.CONF.set_override('max_dns_nameservers', 2)
cfg.CONF.set_override('max_subnet_host_routes', 2)
cfg.CONF.set_override('allow_pagination', True)
cfg.CONF.set_override('allow_sorting', True)
self.api = router.APIRouter()
# Set the defualt status
self.net_create_status = 'ACTIVE'
self.port_create_status = 'ACTIVE'
def _is_native_bulk_supported():
plugin_obj = manager.NeutronManager.get_plugin()
native_bulk_attr_name = ("_%s__native_bulk_support"
% plugin_obj.__class__.__name__)
return getattr(plugin_obj, native_bulk_attr_name, False)
self._skip_native_bulk = not _is_native_bulk_supported()
def _is_native_pagination_support():
native_pagination_attr_name = (
"_%s__native_pagination_support" %
manager.NeutronManager.get_plugin().__class__.__name__)
return (cfg.CONF.allow_pagination and
getattr(manager.NeutronManager.get_plugin(),
native_pagination_attr_name, False))
self._skip_native_pagination = not _is_native_pagination_support()
def _is_native_sorting_support():
native_sorting_attr_name = (
"_%s__native_sorting_support" %
manager.NeutronManager.get_plugin().__class__.__name__)
return (cfg.CONF.allow_sorting and
getattr(manager.NeutronManager.get_plugin(),
native_sorting_attr_name, False))
self.plugin = manager.NeutronManager.get_plugin()
self._skip_native_sorting = not _is_native_sorting_support()
if ext_mgr:
self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr)
def tearDown(self):
self.api = None
self._deserializers = None
self._skip_native_bulk = None
self._skip_native_pagination = None
self._skip_native_sortin = None
self.ext_api = None
super(NeutronDbPluginV2TestCase, self).tearDown()
def setup_config(self):
# Create the default configurations
args = ['--config-file', base.etcdir('neutron.conf')]
# If test_config specifies some config-file, use it, as well
for config_file in test_lib.test_config.get('config_files', []):
args.extend(['--config-file', config_file])
super(NeutronDbPluginV2TestCase, self).setup_config(args=args)
def _req(self, method, resource, data=None, fmt=None, id=None, params=None,
action=None, subresource=None, sub_id=None, context=None):
fmt = fmt or self.fmt
path = '/%s.%s' % (
'/'.join(p for p in
(resource, id, subresource, sub_id, action) if p),
fmt
)
prefix = self.resource_prefix_map.get(resource)
if prefix:
path = prefix + path
content_type = 'application/%s' % fmt
body = None
if data is not None: # empty dict is valid
body = self.serialize(data)
return testlib_api.create_request(path, body, content_type, method,
query_string=params, context=context)
def new_create_request(self, resource, data, fmt=None, id=None,
subresource=None, context=None):
return self._req('POST', resource, data, fmt, id=id,
subresource=subresource, context=context)
def new_list_request(self, resource, fmt=None, params=None,
subresource=None):
return self._req(
'GET', resource, None, fmt, params=params, subresource=subresource
)
def new_show_request(self, resource, id, fmt=None,
subresource=None, fields=None):
if fields:
params = "&".join(["fields=%s" % x for x in fields])
else:
params = None
return self._req('GET', resource, None, fmt, id=id,
params=params, subresource=subresource)
def new_delete_request(self, resource, id, fmt=None, subresource=None,
sub_id=None):
return self._req(
'DELETE',
resource,
None,
fmt,
id=id,
subresource=subresource,
sub_id=sub_id
)
def new_update_request(self, resource, data, id, fmt=None,
subresource=None, context=None):
return self._req(
'PUT', resource, data, fmt, id=id, subresource=subresource,
context=context
)
def new_action_request(self, resource, data, id, action, fmt=None,
subresource=None):
return self._req(
'PUT',
resource,
data,
fmt,
id=id,
action=action,
subresource=subresource
)
def deserialize(self, content_type, response):
ctype = 'application/%s' % content_type
data = self._deserializers[ctype].deserialize(response.body)['body']
return data
def _create_bulk_from_list(self, fmt, resource, objects, **kwargs):
"""Creates a bulk request from a list of objects."""
collection = "%ss" % resource
req_data = {collection: objects}
req = self.new_create_request(collection, req_data, fmt)
if ('set_context' in kwargs and
kwargs['set_context'] is True and
'tenant_id' in kwargs):
# create a specific auth context for this request
req.environ['neutron.context'] = context.Context(
'', kwargs['tenant_id'])
elif 'context' in kwargs:
req.environ['neutron.context'] = kwargs['context']
return req.get_response(self.api)
def _create_bulk(self, fmt, number, resource, data, name='test', **kwargs):
"""Creates a bulk request for any kind of resource."""
objects = []
collection = "%ss" % resource
for i in range(number):
obj = copy.deepcopy(data)
obj[resource]['name'] = "%s_%s" % (name, i)
if 'override' in kwargs and i in kwargs['override']:
obj[resource].update(kwargs['override'][i])
objects.append(obj)
req_data = {collection: objects}
req = self.new_create_request(collection, req_data, fmt)
if ('set_context' in kwargs and
kwargs['set_context'] is True and
'tenant_id' in kwargs):
# create a specific auth context for this request
req.environ['neutron.context'] = context.Context(
'', kwargs['tenant_id'])
elif 'context' in kwargs:
req.environ['neutron.context'] = kwargs['context']
return req.get_response(self.api)
def _create_network(self, fmt, name, admin_state_up,
arg_list=None, **kwargs):
data = {'network': {'name': name,
'admin_state_up': admin_state_up,
'tenant_id': self._tenant_id}}
for arg in (('admin_state_up', 'tenant_id', 'shared',
'vlan_transparent') + (arg_list or ())):
# Arg must be present
if arg in kwargs:
data['network'][arg] = kwargs[arg]
network_req = self.new_create_request('networks', data, fmt)
if (kwargs.get('set_context') and 'tenant_id' in kwargs):
# create a specific auth context for this request
network_req.environ['neutron.context'] = context.Context(
'', kwargs['tenant_id'])
return network_req.get_response(self.api)
def _create_network_bulk(self, fmt, number, name,
admin_state_up, **kwargs):
base_data = {'network': {'admin_state_up': admin_state_up,
'tenant_id': self._tenant_id}}
return self._create_bulk(fmt, number, 'network', base_data, **kwargs)
def _create_subnet(self, fmt, net_id, cidr,
expected_res_status=None, **kwargs):
data = {'subnet': {'network_id': net_id,
'cidr': cidr,
'ip_version': 4,
'tenant_id': self._tenant_id}}
for arg in ('ip_version', 'tenant_id',
'enable_dhcp', 'allocation_pools',
'dns_nameservers', 'host_routes',
'shared', 'ipv6_ra_mode', 'ipv6_address_mode'):
# Arg must be present and not null (but can be false)
if kwargs.get(arg) is not None:
data['subnet'][arg] = kwargs[arg]
if ('gateway_ip' in kwargs and
kwargs['gateway_ip'] is not attributes.ATTR_NOT_SPECIFIED):
data['subnet']['gateway_ip'] = kwargs['gateway_ip']
subnet_req = self.new_create_request('subnets', data, fmt)
if (kwargs.get('set_context') and 'tenant_id' in kwargs):
# create a specific auth context for this request
subnet_req.environ['neutron.context'] = context.Context(
'', kwargs['tenant_id'])
subnet_res = subnet_req.get_response(self.api)
if expected_res_status:
self.assertEqual(subnet_res.status_int, expected_res_status)
return subnet_res
def _create_subnet_bulk(self, fmt, number, net_id, name,
ip_version=4, **kwargs):
base_data = {'subnet': {'network_id': net_id,
'ip_version': ip_version,
'tenant_id': self._tenant_id}}
# auto-generate cidrs as they should not overlap
overrides = dict((k, v)
for (k, v) in zip(range(number),
[{'cidr': "10.0.%s.0/24" % num}
for num in range(number)]))
kwargs.update({'override': overrides})
return self._create_bulk(fmt, number, 'subnet', base_data, **kwargs)
def _create_subnetpool(self, fmt, prefixes,
expected_res_status=None, admin=False, **kwargs):
subnetpool = {'subnetpool': {'prefixes': prefixes}}
for k, v in kwargs.items():
subnetpool['subnetpool'][k] = str(v)
api = self._api_for_resource('subnetpools')
subnetpools_req = self.new_create_request('subnetpools',
subnetpool, fmt)
if not admin:
neutron_context = context.Context('', kwargs['tenant_id'])
subnetpools_req.environ['neutron.context'] = neutron_context
subnetpool_res = subnetpools_req.get_response(api)
if expected_res_status:
self.assertEqual(subnetpool_res.status_int, expected_res_status)
return subnetpool_res
def _create_port(self, fmt, net_id, expected_res_status=None,
arg_list=None, **kwargs):
data = {'port': {'network_id': net_id,
'tenant_id': self._tenant_id}}
for arg in (('admin_state_up', 'device_id',
'mac_address', 'name', 'fixed_ips',
'tenant_id', 'device_owner', 'security_groups') +
(arg_list or ())):
# Arg must be present
if arg in kwargs:
data['port'][arg] = kwargs[arg]
# create a dhcp port device id if one hasn't been supplied
if ('device_owner' in kwargs and
kwargs['device_owner'] == constants.DEVICE_OWNER_DHCP and
'host' in kwargs and
'device_id' not in kwargs):
device_id = utils.get_dhcp_agent_device_id(net_id, kwargs['host'])
data['port']['device_id'] = device_id
port_req = self.new_create_request('ports', data, fmt)
if (kwargs.get('set_context') and 'tenant_id' in kwargs):
# create a specific auth context for this request
port_req.environ['neutron.context'] = context.Context(
'', kwargs['tenant_id'])
port_res = port_req.get_response(self.api)
if expected_res_status:
self.assertEqual(port_res.status_int, expected_res_status)
return port_res
def _list_ports(self, fmt, expected_res_status=None,
net_id=None, **kwargs):
query_params = []
if net_id:
query_params.append("network_id=%s" % net_id)
if kwargs.get('device_owner'):
query_params.append("device_owner=%s" % kwargs.get('device_owner'))
port_req = self.new_list_request('ports', fmt, '&'.join(query_params))
if ('set_context' in kwargs and
kwargs['set_context'] is True and
'tenant_id' in kwargs):
# create a specific auth context for this request
port_req.environ['neutron.context'] = context.Context(
'', kwargs['tenant_id'])
port_res = port_req.get_response(self.api)
if expected_res_status:
self.assertEqual(port_res.status_int, expected_res_status)
return port_res
def _create_port_bulk(self, fmt, number, net_id, name,
admin_state_up, **kwargs):
base_data = {'port': {'network_id': net_id,
'admin_state_up': admin_state_up,
'tenant_id': self._tenant_id}}
return self._create_bulk(fmt, number, 'port', base_data, **kwargs)
def _make_network(self, fmt, name, admin_state_up, **kwargs):
res = self._create_network(fmt, name, admin_state_up, **kwargs)
# TODO(salvatore-orlando): do exception handling in this test module
# in a uniform way (we do it differently for ports, subnets, and nets
# Things can go wrong - raise HTTP exc with res code only
# so it can be caught by unit tests
if res.status_int >= webob.exc.HTTPClientError.code:
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(fmt, res)
def _make_subnet(self, fmt, network, gateway, cidr,
allocation_pools=None, ip_version=4, enable_dhcp=True,
dns_nameservers=None, host_routes=None, shared=None,
ipv6_ra_mode=None, ipv6_address_mode=None,
tenant_id=None, set_context=False):
res = self._create_subnet(fmt,
net_id=network['network']['id'],
cidr=cidr,
gateway_ip=gateway,
tenant_id=(tenant_id or
network['network']['tenant_id']),
allocation_pools=allocation_pools,
ip_version=ip_version,
enable_dhcp=enable_dhcp,
dns_nameservers=dns_nameservers,
host_routes=host_routes,
shared=shared,
ipv6_ra_mode=ipv6_ra_mode,
ipv6_address_mode=ipv6_address_mode,
set_context=set_context)
# Things can go wrong - raise HTTP exc with res code only
# so it can be caught by unit tests
if res.status_int >= webob.exc.HTTPClientError.code:
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(fmt, res)
def _make_subnetpool(self, fmt, prefixes, admin=False, **kwargs):
res = self._create_subnetpool(fmt,
prefixes,
None,
admin,
**kwargs)
# Things can go wrong - raise HTTP exc with res code only
# so it can be caught by unit tests
if res.status_int >= webob.exc.HTTPClientError.code:
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(fmt, res)
def _make_port(self, fmt, net_id, expected_res_status=None, **kwargs):
res = self._create_port(fmt, net_id, expected_res_status, **kwargs)
# Things can go wrong - raise HTTP exc with res code only
# so it can be caught by unit tests
if res.status_int >= webob.exc.HTTPClientError.code:
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(fmt, res)
def _api_for_resource(self, resource):
if resource in ['networks', 'subnets', 'ports', 'subnetpools']:
return self.api
else:
return self.ext_api
def _delete(self, collection, id,
expected_code=webob.exc.HTTPNoContent.code,
neutron_context=None):
req = self.new_delete_request(collection, id)
if neutron_context:
# create a specific auth context for this request
req.environ['neutron.context'] = neutron_context
res = req.get_response(self._api_for_resource(collection))
self.assertEqual(res.status_int, expected_code)
def _show_response(self, resource, id, neutron_context=None):
req = self.new_show_request(resource, id)
if neutron_context:
# create a specific auth context for this request
req.environ['neutron.context'] = neutron_context
return req.get_response(self._api_for_resource(resource))
def _show(self, resource, id,
expected_code=webob.exc.HTTPOk.code,
neutron_context=None):
res = self._show_response(resource, id,
neutron_context=neutron_context)
self.assertEqual(expected_code, res.status_int)
return self.deserialize(self.fmt, res)
def _update(self, resource, id, new_data,
expected_code=webob.exc.HTTPOk.code,
neutron_context=None):
req = self.new_update_request(resource, new_data, id)
if neutron_context:
# create a specific auth context for this request
req.environ['neutron.context'] = neutron_context
res = req.get_response(self._api_for_resource(resource))
self.assertEqual(res.status_int, expected_code)
return self.deserialize(self.fmt, res)
def _list(self, resource, fmt=None, neutron_context=None,
query_params=None):
fmt = fmt or self.fmt
req = self.new_list_request(resource, fmt, query_params)
if neutron_context:
req.environ['neutron.context'] = neutron_context
res = req.get_response(self._api_for_resource(resource))
self.assertEqual(res.status_int, webob.exc.HTTPOk.code)
return self.deserialize(fmt, res)
def _fail_second_call(self, patched_plugin, orig, *args, **kwargs):
"""Invoked by test cases for injecting failures in plugin."""
def second_call(*args, **kwargs):
raise n_exc.NeutronException()
patched_plugin.side_effect = second_call
return orig(*args, **kwargs)
def _validate_behavior_on_bulk_failure(
self, res, collection,
errcode=webob.exc.HTTPClientError.code):
self.assertEqual(res.status_int, errcode)
req = self.new_list_request(collection)
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPOk.code)
items = self.deserialize(self.fmt, res)
self.assertEqual(len(items[collection]), 0)
def _validate_behavior_on_bulk_success(self, res, collection,
names=['test_0', 'test_1']):
self.assertEqual(res.status_int, webob.exc.HTTPCreated.code)
items = self.deserialize(self.fmt, res)[collection]
self.assertEqual(len(items), 2)
self.assertEqual(items[0]['name'], 'test_0')
self.assertEqual(items[1]['name'], 'test_1')
def _test_list_resources(self, resource, items, neutron_context=None,
query_params=None):
res = self._list('%ss' % resource,
neutron_context=neutron_context,
query_params=query_params)
resource = resource.replace('-', '_')
self.assertItemsEqual([i['id'] for i in res['%ss' % resource]],
[i[resource]['id'] for i in items])
@contextlib.contextmanager
def network(self, name='net1',
admin_state_up=True,
fmt=None,
**kwargs):
network = self._make_network(fmt or self.fmt, name,
admin_state_up, **kwargs)
yield network
@contextlib.contextmanager
def subnet(self, network=None,
gateway_ip=attributes.ATTR_NOT_SPECIFIED,
cidr='10.0.0.0/24',
fmt=None,
ip_version=4,
allocation_pools=None,
enable_dhcp=True,
dns_nameservers=None,
host_routes=None,
shared=None,
ipv6_ra_mode=None,
ipv6_address_mode=None,
tenant_id=None,
set_context=False):
with optional_ctx(network, self.network) as network_to_use:
subnet = self._make_subnet(fmt or self.fmt,
network_to_use,
gateway_ip,
cidr,
allocation_pools,
ip_version,
enable_dhcp,
dns_nameservers,
host_routes,
shared=shared,
ipv6_ra_mode=ipv6_ra_mode,
ipv6_address_mode=ipv6_address_mode,
tenant_id=tenant_id,
set_context=set_context)
yield subnet
@contextlib.contextmanager
def subnetpool(self, prefixes, admin=False, **kwargs):
subnetpool = self._make_subnetpool(self.fmt,
prefixes,
admin,
**kwargs)
yield subnetpool
@contextlib.contextmanager
def port(self, subnet=None, fmt=None, **kwargs):
with optional_ctx(subnet, self.subnet) as subnet_to_use:
net_id = subnet_to_use['subnet']['network_id']
port = self._make_port(fmt or self.fmt, net_id, **kwargs)
yield port
def _test_list_with_sort(self, resource,
items, sorts, resources=None, query_params=''):
query_str = query_params
for key, direction in sorts:
query_str = query_str + "&sort_key=%s&sort_dir=%s" % (key,
direction)
if not resources:
resources = '%ss' % resource
req = self.new_list_request(resources,
params=query_str)
api = self._api_for_resource(resources)
res = self.deserialize(self.fmt, req.get_response(api))
resource = resource.replace('-', '_')
resources = resources.replace('-', '_')
expected_res = [item[resource]['id'] for item in items]
self.assertEqual(expected_res, [n['id'] for n in res[resources]])
def _test_list_with_pagination(self, resource, items, sort,
limit, expected_page_num,
resources=None,
query_params='',
verify_key='id'):
if not resources:
resources = '%ss' % resource
query_str = query_params + '&' if query_params else ''
query_str = query_str + ("limit=%s&sort_key=%s&"
"sort_dir=%s") % (limit, sort[0], sort[1])
req = self.new_list_request(resources, params=query_str)
items_res = []
page_num = 0
api = self._api_for_resource(resources)
resource = resource.replace('-', '_')
resources = resources.replace('-', '_')
while req:
page_num = page_num + 1
res = self.deserialize(self.fmt, req.get_response(api))
self.assertThat(len(res[resources]),
matchers.LessThan(limit + 1))
items_res = items_res + res[resources]
req = None
if '%s_links' % resources in res:
for link in res['%s_links' % resources]:
if link['rel'] == 'next':
content_type = 'application/%s' % self.fmt
req = testlib_api.create_request(link['href'],
'', content_type)
self.assertEqual(len(res[resources]),
limit)
self.assertEqual(expected_page_num, page_num)
self.assertEqual([item[resource][verify_key] for item in items],
[n[verify_key] for n in items_res])
def _test_list_with_pagination_reverse(self, resource, items, sort,
limit, expected_page_num,
resources=None,
query_params=''):
if not resources:
resources = '%ss' % resource
resource = resource.replace('-', '_')
api = self._api_for_resource(resources)
marker = items[-1][resource]['id']
query_str = query_params + '&' if query_params else ''
query_str = query_str + ("limit=%s&page_reverse=True&"
"sort_key=%s&sort_dir=%s&"
"marker=%s") % (limit, sort[0], sort[1],
marker)
req = self.new_list_request(resources, params=query_str)
item_res = [items[-1][resource]]
page_num = 0
resources = resources.replace('-', '_')
while req:
page_num = page_num + 1
res = self.deserialize(self.fmt, req.get_response(api))
self.assertThat(len(res[resources]),
matchers.LessThan(limit + 1))
res[resources].reverse()
item_res = item_res + res[resources]
req = None
if '%s_links' % resources in res:
for link in res['%s_links' % resources]:
if link['rel'] == 'previous':
content_type = 'application/%s' % self.fmt
req = testlib_api.create_request(link['href'],
'', content_type)
self.assertEqual(len(res[resources]),
limit)
self.assertEqual(expected_page_num, page_num)
expected_res = [item[resource]['id'] for item in items]
expected_res.reverse()
self.assertEqual(expected_res, [n['id'] for n in item_res])
def _compare_resource(self, observed_res, expected_res, res_name):
'''
Compare the observed and expected resources (ie compare subnets)
'''
for k in expected_res:
self.assertIn(k, observed_res[res_name])
if isinstance(expected_res[k], list):
self.assertEqual(sorted(observed_res[res_name][k]),
sorted(expected_res[k]))
else:
self.assertEqual(observed_res[res_name][k], expected_res[k])
def _validate_resource(self, resource, keys, res_name):
for k in keys:
self.assertIn(k, resource[res_name])
if isinstance(keys[k], list):
self.assertEqual(sorted(resource[res_name][k]),
sorted(keys[k]))
else:
self.assertEqual(resource[res_name][k], keys[k])
class TestBasicGet(NeutronDbPluginV2TestCase):
def test_single_get_admin(self):
plugin = neutron.db.db_base_plugin_v2.NeutronDbPluginV2()
with self.network() as network:
net_id = network['network']['id']
ctx = context.get_admin_context()
n = plugin._get_network(ctx, net_id)
self.assertEqual(net_id, n.id)
def test_single_get_tenant(self):
plugin = neutron.db.db_base_plugin_v2.NeutronDbPluginV2()
with self.network() as network:
net_id = network['network']['id']
ctx = context.get_admin_context()
n = plugin._get_network(ctx, net_id)
self.assertEqual(net_id, n.id)
class TestV2HTTPResponse(NeutronDbPluginV2TestCase):
def test_create_returns_201(self):
res = self._create_network(self.fmt, 'net2', True)
self.assertEqual(res.status_int, webob.exc.HTTPCreated.code)
def test_list_returns_200(self):
req = self.new_list_request('networks')
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPOk.code)
def _check_list_with_fields(self, res, field_name):
self.assertEqual(res.status_int, webob.exc.HTTPOk.code)
body = self.deserialize(self.fmt, res)
# further checks: 1 networks
self.assertEqual(len(body['networks']), 1)
# 1 field in the network record
self.assertEqual(len(body['networks'][0]), 1)
# field is 'name'
self.assertIn(field_name, body['networks'][0])
def test_list_with_fields(self):
self._create_network(self.fmt, 'some_net', True)
req = self.new_list_request('networks', params="fields=name")
res = req.get_response(self.api)
self._check_list_with_fields(res, 'name')
def test_list_with_fields_noadmin(self):
tenant_id = 'some_tenant'
self._create_network(self.fmt,
'some_net',
True,
tenant_id=tenant_id,
set_context=True)
req = self.new_list_request('networks', params="fields=name")
req.environ['neutron.context'] = context.Context('', tenant_id)
res = req.get_response(self.api)
self._check_list_with_fields(res, 'name')
def test_list_with_fields_noadmin_and_policy_field(self):
"""If a field used by policy is selected, do not duplicate it.
Verifies that if the field parameter explicitly specifies a field
which is used by the policy engine, then it is not duplicated
in the response.
"""
tenant_id = 'some_tenant'
self._create_network(self.fmt,
'some_net',
True,
tenant_id=tenant_id,
set_context=True)
req = self.new_list_request('networks', params="fields=tenant_id")
req.environ['neutron.context'] = context.Context('', tenant_id)
res = req.get_response(self.api)
self._check_list_with_fields(res, 'tenant_id')
def test_show_returns_200(self):
with self.network() as net:
req = self.new_show_request('networks', net['network']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPOk.code)
def test_delete_returns_204(self):
res = self._create_network(self.fmt, 'net1', True)
net = self.deserialize(self.fmt, res)
req = self.new_delete_request('networks', net['network']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code)
def test_update_returns_200(self):
with self.network() as net:
req = self.new_update_request('networks',
{'network': {'name': 'steve'}},
net['network']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPOk.code)
def test_update_invalid_json_400(self):
with self.network() as net:
req = self.new_update_request('networks',
'{{"name": "aaa"}}',
net['network']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_bad_route_404(self):
req = self.new_list_request('doohickeys')
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code)
class TestPortsV2(NeutronDbPluginV2TestCase):
def test_create_port_json(self):
keys = [('admin_state_up', True), ('status', self.port_create_status)]
with self.port(name='myname') as port:
for k, v in keys:
self.assertEqual(port['port'][k], v)
self.assertIn('mac_address', port['port'])
ips = port['port']['fixed_ips']
self.assertEqual(len(ips), 1)
self.assertEqual(ips[0]['ip_address'], '10.0.0.2')
self.assertEqual('myname', port['port']['name'])
def test_create_port_as_admin(self):
with self.network() as network:
self._create_port(self.fmt,
network['network']['id'],
webob.exc.HTTPCreated.code,
tenant_id='bad_tenant_id',
device_id='fake_device',
device_owner='fake_owner',
fixed_ips=[],
set_context=False)
def test_create_port_bad_tenant(self):
with self.network() as network:
self._create_port(self.fmt,
network['network']['id'],
webob.exc.HTTPNotFound.code,
tenant_id='bad_tenant_id',
device_id='fake_device',
device_owner='fake_owner',
fixed_ips=[],
set_context=True)
def test_create_port_public_network(self):
keys = [('admin_state_up', True), ('status', self.port_create_status)]
with self.network(shared=True) as network:
port_res = self._create_port(self.fmt,
network['network']['id'],
webob.exc.HTTPCreated.code,
tenant_id='another_tenant',
set_context=True)
port = self.deserialize(self.fmt, port_res)
for k, v in keys:
self.assertEqual(port['port'][k], v)
self.assertIn('mac_address', port['port'])
self._delete('ports', port['port']['id'])
def test_create_port_public_network_with_ip(self):
with self.network(shared=True) as network:
with self.subnet(network=network, cidr='10.0.0.0/24') as subnet:
keys = [('admin_state_up', True),
('status', self.port_create_status),
('fixed_ips', [{'subnet_id': subnet['subnet']['id'],
'ip_address': '10.0.0.2'}])]
port_res = self._create_port(self.fmt,
network['network']['id'],
webob.exc.HTTPCreated.code,
tenant_id='another_tenant',
set_context=True)
port = self.deserialize(self.fmt, port_res)
for k, v in keys:
self.assertEqual(port['port'][k], v)
self.assertIn('mac_address', port['port'])
self._delete('ports', port['port']['id'])
def test_create_port_anticipating_allocation(self):
with self.network(shared=True) as network:
with self.subnet(network=network, cidr='10.0.0.0/24') as subnet:
fixed_ips = [{'subnet_id': subnet['subnet']['id']},
{'subnet_id': subnet['subnet']['id'],
'ip_address': '10.0.0.2'}]
self._create_port(self.fmt, network['network']['id'],
webob.exc.HTTPCreated.code,
fixed_ips=fixed_ips)
def test_create_port_public_network_with_invalid_ip_no_subnet_id(self,
expected_error='InvalidIpForNetwork'):
with self.network(shared=True) as network:
with self.subnet(network=network, cidr='10.0.0.0/24'):
ips = [{'ip_address': '1.1.1.1'}]
res = self._create_port(self.fmt,
network['network']['id'],
webob.exc.HTTPBadRequest.code,
fixed_ips=ips,
set_context=True)
data = self.deserialize(self.fmt, res)
msg = str(n_exc.InvalidIpForNetwork(ip_address='1.1.1.1'))
self.assertEqual(expected_error, data['NeutronError']['type'])
self.assertEqual(msg, data['NeutronError']['message'])
def test_create_port_public_network_with_invalid_ip_and_subnet_id(self,
expected_error='InvalidIpForSubnet'):
with self.network(shared=True) as network:
with self.subnet(network=network, cidr='10.0.0.0/24') as subnet:
ips = [{'subnet_id': subnet['subnet']['id'],
'ip_address': '1.1.1.1'}]
res = self._create_port(self.fmt,
network['network']['id'],
webob.exc.HTTPBadRequest.code,
fixed_ips=ips,
set_context=True)
data = self.deserialize(self.fmt, res)
msg = str(n_exc.InvalidIpForSubnet(ip_address='1.1.1.1'))
self.assertEqual(expected_error, data['NeutronError']['type'])
self.assertEqual(msg, data['NeutronError']['message'])
def test_create_ports_bulk_native(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk port create")
with self.network() as net:
res = self._create_port_bulk(self.fmt, 2, net['network']['id'],
'test', True)
self._validate_behavior_on_bulk_success(res, 'ports')
for p in self.deserialize(self.fmt, res)['ports']:
self._delete('ports', p['id'])
def test_create_ports_bulk_emulated(self):
real_has_attr = hasattr
#ensures the API choose the emulation code path
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
return real_has_attr(item, attr)
with mock.patch('six.moves.builtins.hasattr',
new=fakehasattr):
with self.network() as net:
res = self._create_port_bulk(self.fmt, 2, net['network']['id'],
'test', True)
self._validate_behavior_on_bulk_success(res, 'ports')
for p in self.deserialize(self.fmt, res)['ports']:
self._delete('ports', p['id'])
def test_create_ports_bulk_wrong_input(self):
with self.network() as net:
overrides = {1: {'admin_state_up': 'doh'}}
res = self._create_port_bulk(self.fmt, 2, net['network']['id'],
'test', True,
override=overrides)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
req = self.new_list_request('ports')
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPOk.code)
ports = self.deserialize(self.fmt, res)
self.assertEqual(len(ports['ports']), 0)
def test_create_ports_bulk_emulated_plugin_failure(self):
real_has_attr = hasattr
#ensures the API choose the emulation code path
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
return real_has_attr(item, attr)
with mock.patch('six.moves.builtins.hasattr',
new=fakehasattr):
orig = manager.NeutronManager.get_plugin().create_port
method_to_patch = _get_create_db_method('port')
with mock.patch.object(manager.NeutronManager.get_plugin(),
method_to_patch) as patched_plugin:
def side_effect(*args, **kwargs):
return self._fail_second_call(patched_plugin, orig,
*args, **kwargs)
patched_plugin.side_effect = side_effect
with self.network() as net:
res = self._create_port_bulk(self.fmt, 2,
net['network']['id'],
'test',
True)
# We expect a 500 as we injected a fault in the plugin
self._validate_behavior_on_bulk_failure(
res, 'ports', webob.exc.HTTPServerError.code
)
def test_create_ports_bulk_native_plugin_failure(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk port create")
ctx = context.get_admin_context()
with self.network() as net:
plugin = manager.NeutronManager.get_plugin()
orig = plugin.create_port
method_to_patch = _get_create_db_method('port')
with mock.patch.object(plugin, method_to_patch) as patched_plugin:
def side_effect(*args, **kwargs):
return self._fail_second_call(patched_plugin, orig,
*args, **kwargs)
patched_plugin.side_effect = side_effect
res = self._create_port_bulk(self.fmt, 2, net['network']['id'],
'test', True, context=ctx)
# We expect a 500 as we injected a fault in the plugin
self._validate_behavior_on_bulk_failure(
res, 'ports', webob.exc.HTTPServerError.code)
def test_list_ports(self):
# for this test we need to enable overlapping ips
cfg.CONF.set_default('allow_overlapping_ips', True)
with self.port() as v1, self.port() as v2, self.port() as v3:
ports = (v1, v2, v3)
self._test_list_resources('port', ports)
def test_list_ports_filtered_by_fixed_ip(self):
# for this test we need to enable overlapping ips
cfg.CONF.set_default('allow_overlapping_ips', True)
with self.port() as port1, self.port():
fixed_ips = port1['port']['fixed_ips'][0]
query_params = """
fixed_ips=ip_address%%3D%s&fixed_ips=ip_address%%3D%s&fixed_ips=subnet_id%%3D%s
""".strip() % (fixed_ips['ip_address'],
'192.168.126.5',
fixed_ips['subnet_id'])
self._test_list_resources('port', [port1],
query_params=query_params)
def test_list_ports_public_network(self):
with self.network(shared=True) as network:
with self.subnet(network) as subnet:
with self.port(subnet, tenant_id='tenant_1') as port1,\
self.port(subnet, tenant_id='tenant_2') as port2:
# Admin request - must return both ports
self._test_list_resources('port', [port1, port2])
# Tenant_1 request - must return single port
q_context = context.Context('', 'tenant_1')
self._test_list_resources('port', [port1],
neutron_context=q_context)
# Tenant_2 request - must return single port
q_context = context.Context('', 'tenant_2')
self._test_list_resources('port', [port2],
neutron_context=q_context)
def test_list_ports_with_sort_native(self):
if self._skip_native_sorting:
self.skipTest("Skip test for not implemented sorting feature")
cfg.CONF.set_default('allow_overlapping_ips', True)
with self.port(admin_state_up='True',
mac_address='00:00:00:00:00:01') as port1,\
self.port(admin_state_up='False',
mac_address='00:00:00:00:00:02') as port2,\
self.port(admin_state_up='False',
mac_address='00:00:00:00:00:03') as port3:
self._test_list_with_sort('port', (port3, port2, port1),
[('admin_state_up', 'asc'),
('mac_address', 'desc')])
def test_list_ports_with_sort_emulated(self):
helper_patcher = mock.patch(
'neutron.api.v2.base.Controller._get_sorting_helper',
new=_fake_get_sorting_helper)
helper_patcher.start()
cfg.CONF.set_default('allow_overlapping_ips', True)
with self.port(admin_state_up='True',
mac_address='00:00:00:00:00:01') as port1,\
self.port(admin_state_up='False',
mac_address='00:00:00:00:00:02') as port2,\
self.port(admin_state_up='False',
mac_address='00:00:00:00:00:03') as port3:
self._test_list_with_sort('port', (port3, port2, port1),
[('admin_state_up', 'asc'),
('mac_address', 'desc')])
def test_list_ports_with_pagination_native(self):
if self._skip_native_pagination:
self.skipTest("Skip test for not implemented pagination feature")
cfg.CONF.set_default('allow_overlapping_ips', True)
with self.port(mac_address='00:00:00:00:00:01') as port1,\
self.port(mac_address='00:00:00:00:00:02') as port2,\
self.port(mac_address='00:00:00:00:00:03') as port3:
self._test_list_with_pagination('port',
(port1, port2, port3),
('mac_address', 'asc'), 2, 2)
def test_list_ports_with_pagination_emulated(self):
helper_patcher = mock.patch(
'neutron.api.v2.base.Controller._get_pagination_helper',
new=_fake_get_pagination_helper)
helper_patcher.start()
cfg.CONF.set_default('allow_overlapping_ips', True)
with self.port(mac_address='00:00:00:00:00:01') as port1,\
self.port(mac_address='00:00:00:00:00:02') as port2,\
self.port(mac_address='00:00:00:00:00:03') as port3:
self._test_list_with_pagination('port',
(port1, port2, port3),
('mac_address', 'asc'), 2, 2)
def test_list_ports_with_pagination_reverse_native(self):
if self._skip_native_pagination:
self.skipTest("Skip test for not implemented pagination feature")
cfg.CONF.set_default('allow_overlapping_ips', True)
with self.port(mac_address='00:00:00:00:00:01') as port1,\
self.port(mac_address='00:00:00:00:00:02') as port2,\
self.port(mac_address='00:00:00:00:00:03') as port3:
self._test_list_with_pagination_reverse('port',
(port1, port2, port3),
('mac_address', 'asc'),
2, 2)
def test_list_ports_with_pagination_reverse_emulated(self):
helper_patcher = mock.patch(
'neutron.api.v2.base.Controller._get_pagination_helper',
new=_fake_get_pagination_helper)
helper_patcher.start()
cfg.CONF.set_default('allow_overlapping_ips', True)
with self.port(mac_address='00:00:00:00:00:01') as port1,\
self.port(mac_address='00:00:00:00:00:02') as port2,\
self.port(mac_address='00:00:00:00:00:03') as port3:
self._test_list_with_pagination_reverse('port',
(port1, port2, port3),
('mac_address', 'asc'),
2, 2)
def test_show_port(self):
with self.port() as port:
req = self.new_show_request('ports', port['port']['id'], self.fmt)
sport = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(port['port']['id'], sport['port']['id'])
def test_delete_port(self):
with self.port() as port:
self._delete('ports', port['port']['id'])
self._show('ports', port['port']['id'],
expected_code=webob.exc.HTTPNotFound.code)
def test_delete_port_public_network(self):
with self.network(shared=True) as network:
port_res = self._create_port(self.fmt,
network['network']['id'],
webob.exc.HTTPCreated.code,
tenant_id='another_tenant',
set_context=True)
port = self.deserialize(self.fmt, port_res)
self._delete('ports', port['port']['id'])
self._show('ports', port['port']['id'],
expected_code=webob.exc.HTTPNotFound.code)
def test_update_port(self):
with self.port() as port:
data = {'port': {'admin_state_up': False}}
req = self.new_update_request('ports', data, port['port']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(res['port']['admin_state_up'],
data['port']['admin_state_up'])
def update_port_mac(self, port, updated_fixed_ips=None):
orig_mac = port['mac_address']
mac = orig_mac.split(':')
mac[5] = '01' if mac[5] != '01' else '00'
new_mac = ':'.join(mac)
data = {'port': {'mac_address': new_mac}}
if updated_fixed_ips:
data['port']['fixed_ips'] = updated_fixed_ips
req = self.new_update_request('ports', data, port['id'])
return req.get_response(self.api), new_mac
def _check_v6_auto_address_address(self, port, subnet):
if ipv6_utils.is_auto_address_subnet(subnet['subnet']):
port_mac = port['port']['mac_address']
subnet_cidr = subnet['subnet']['cidr']
eui_addr = str(ipv6_utils.get_ipv6_addr_by_EUI64(subnet_cidr,
port_mac))
self.assertEqual(port['port']['fixed_ips'][0]['ip_address'],
eui_addr)
def check_update_port_mac(
self, expected_status=webob.exc.HTTPOk.code,
expected_error='StateInvalid', subnet=None,
device_owner=DEVICE_OWNER_COMPUTE, updated_fixed_ips=None,
host_arg={}, arg_list=[]):
with self.port(device_owner=device_owner, subnet=subnet,
arg_list=arg_list, **host_arg) as port:
self.assertIn('mac_address', port['port'])
res, new_mac = self.update_port_mac(
port['port'], updated_fixed_ips=updated_fixed_ips)
self.assertEqual(expected_status, res.status_int)
if expected_status == webob.exc.HTTPOk.code:
result = self.deserialize(self.fmt, res)
self.assertIn('port', result)
self.assertEqual(new_mac, result['port']['mac_address'])
if subnet and subnet['subnet']['ip_version'] == 6:
self._check_v6_auto_address_address(port, subnet)
else:
error = self.deserialize(self.fmt, res)
self.assertEqual(expected_error,
error['NeutronError']['type'])
def test_update_port_mac(self):
self.check_update_port_mac()
# sub-classes for plugins/drivers that support mac address update
# override this method
def test_update_port_mac_ip(self):
with self.subnet() as subnet:
updated_fixed_ips = [{'subnet_id': subnet['subnet']['id'],
'ip_address': '10.0.0.3'}]
self.check_update_port_mac(subnet=subnet,
updated_fixed_ips=updated_fixed_ips)
def test_update_port_mac_v6_slaac(self):
with self.subnet(gateway_ip='fe80::1',
cidr='2607:f0d0:1002:51::/64',
ip_version=6,
ipv6_address_mode=constants.IPV6_SLAAC) as subnet:
self.assertTrue(
ipv6_utils.is_auto_address_subnet(subnet['subnet']))
self.check_update_port_mac(subnet=subnet)
def test_update_port_mac_bad_owner(self):
self.check_update_port_mac(
device_owner=DEVICE_OWNER_NOT_COMPUTE,
expected_status=webob.exc.HTTPConflict.code,
expected_error='UnsupportedPortDeviceOwner')
def check_update_port_mac_used(self, expected_error='MacAddressInUse'):
with self.subnet() as subnet:
with self.port(subnet=subnet) as port:
with self.port(subnet=subnet) as port2:
self.assertIn('mac_address', port['port'])
new_mac = port2['port']['mac_address']
data = {'port': {'mac_address': new_mac}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = req.get_response(self.api)
self.assertEqual(webob.exc.HTTPConflict.code,
res.status_int)
error = self.deserialize(self.fmt, res)
self.assertEqual(expected_error,
error['NeutronError']['type'])
def test_update_port_mac_used(self):
self.check_update_port_mac_used()
def test_update_port_not_admin(self):
res = self._create_network(self.fmt, 'net1', True,
tenant_id='not_admin',
set_context=True)
net1 = self.deserialize(self.fmt, res)
res = self._create_port(self.fmt, net1['network']['id'],
tenant_id='not_admin', set_context=True)
port = self.deserialize(self.fmt, res)
data = {'port': {'admin_state_up': False}}
neutron_context = context.Context('', 'not_admin')
port = self._update('ports', port['port']['id'], data,
neutron_context=neutron_context)
self.assertEqual(port['port']['admin_state_up'], False)
def test_update_device_id_unchanged(self):
with self.port() as port:
data = {'port': {'admin_state_up': True,
'device_id': port['port']['device_id']}}
req = self.new_update_request('ports', data, port['port']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(res['port']['admin_state_up'], True)
def test_update_device_id_null(self):
with self.port() as port:
data = {'port': {'device_id': None}}
req = self.new_update_request('ports', data, port['port']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_delete_network_if_port_exists(self):
with self.port() as port:
req = self.new_delete_request('networks',
port['port']['network_id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPConflict.code)
def test_delete_network_port_exists_owned_by_network(self):
res = self._create_network(fmt=self.fmt, name='net',
admin_state_up=True)
network = self.deserialize(self.fmt, res)
network_id = network['network']['id']
self._create_port(self.fmt, network_id,
device_owner=constants.DEVICE_OWNER_DHCP)
req = self.new_delete_request('networks', network_id)
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code)
def test_update_port_delete_ip(self):
with self.subnet() as subnet:
with self.port(subnet=subnet) as port:
data = {'port': {'admin_state_up': False,
'fixed_ips': []}}
req = self.new_update_request('ports',
data, port['port']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(res['port']['admin_state_up'],
data['port']['admin_state_up'])
self.assertEqual(res['port']['fixed_ips'],
data['port']['fixed_ips'])
def test_no_more_port_exception(self):
with self.subnet(cidr='10.0.0.0/31', enable_dhcp=False) as subnet:
id = subnet['subnet']['network_id']
res = self._create_port(self.fmt, id)
data = self.deserialize(self.fmt, res)
msg = str(n_exc.IpAddressGenerationFailure(net_id=id))
self.assertEqual(data['NeutronError']['message'], msg)
self.assertEqual(res.status_int, webob.exc.HTTPConflict.code)
def test_update_port_update_ip(self):
"""Test update of port IP.
Check that a configured IP 10.0.0.2 is replaced by 10.0.0.10.
"""
with self.subnet() as subnet:
with self.port(subnet=subnet) as port:
ips = port['port']['fixed_ips']
self.assertEqual(len(ips), 1)
self.assertEqual(ips[0]['ip_address'], '10.0.0.2')
self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
data = {'port': {'fixed_ips': [{'subnet_id':
subnet['subnet']['id'],
'ip_address': "10.0.0.10"}]}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
ips = res['port']['fixed_ips']
self.assertEqual(len(ips), 1)
self.assertEqual(ips[0]['ip_address'], '10.0.0.10')
self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
def test_update_port_update_ip_address_only(self):
with self.subnet() as subnet:
with self.port(subnet=subnet) as port:
ips = port['port']['fixed_ips']
self.assertEqual(len(ips), 1)
self.assertEqual(ips[0]['ip_address'], '10.0.0.2')
self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
data = {'port': {'fixed_ips': [{'subnet_id':
subnet['subnet']['id'],
'ip_address': "10.0.0.10"},
{'ip_address': "10.0.0.2"}]}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
ips = res['port']['fixed_ips']
self.assertEqual(len(ips), 2)
self.assertIn({'ip_address': '10.0.0.2',
'subnet_id': subnet['subnet']['id']}, ips)
self.assertIn({'ip_address': '10.0.0.10',
'subnet_id': subnet['subnet']['id']}, ips)
def test_update_port_update_ips(self):
"""Update IP and associate new IP on port.
Check a port update with the specified subnet_id's. A IP address
will be allocated for each subnet_id.
"""
with self.subnet() as subnet:
with self.port(subnet=subnet) as port:
data = {'port': {'admin_state_up': False,
'fixed_ips': [{'subnet_id':
subnet['subnet']['id'],
'ip_address': '10.0.0.3'}]}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(res['port']['admin_state_up'],
data['port']['admin_state_up'])
ips = res['port']['fixed_ips']
self.assertEqual(len(ips), 1)
self.assertEqual(ips[0]['ip_address'], '10.0.0.3')
self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
def test_update_port_add_additional_ip(self):
"""Test update of port with additional IP."""
with self.subnet() as subnet:
with self.port(subnet=subnet) as port:
data = {'port': {'admin_state_up': False,
'fixed_ips': [{'subnet_id':
subnet['subnet']['id']},
{'subnet_id':
subnet['subnet']['id']}]}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(res['port']['admin_state_up'],
data['port']['admin_state_up'])
ips = res['port']['fixed_ips']
self.assertEqual(len(ips), 2)
self.assertIn({'ip_address': '10.0.0.3',
'subnet_id': subnet['subnet']['id']}, ips)
self.assertIn({'ip_address': '10.0.0.4',
'subnet_id': subnet['subnet']['id']}, ips)
def test_update_port_invalid_fixed_ip_address_v6_slaac(self):
with self.subnet(
cidr='2607:f0d0:1002:51::/64',
ip_version=6,
ipv6_address_mode=constants.IPV6_SLAAC,
gateway_ip=attributes.ATTR_NOT_SPECIFIED) as subnet:
with self.port(subnet=subnet) as port:
ips = port['port']['fixed_ips']
self.assertEqual(len(ips), 1)
port_mac = port['port']['mac_address']
subnet_cidr = subnet['subnet']['cidr']
eui_addr = str(ipv6_utils.get_ipv6_addr_by_EUI64(subnet_cidr,
port_mac))
self.assertEqual(ips[0]['ip_address'], eui_addr)
self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
data = {'port': {'fixed_ips': [{'subnet_id':
subnet['subnet']['id'],
'ip_address':
'2607:f0d0:1002:51::5'}]}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = req.get_response(self.api)
err = self.deserialize(self.fmt, res)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
self.assertEqual(err['NeutronError']['type'], 'InvalidInput')
def test_requested_duplicate_mac(self):
with self.port() as port:
mac = port['port']['mac_address']
# check that MAC address matches base MAC
base_mac = cfg.CONF.base_mac[0:2]
self.assertTrue(mac.startswith(base_mac))
kwargs = {"mac_address": mac}
net_id = port['port']['network_id']
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
self.assertEqual(res.status_int, webob.exc.HTTPConflict.code)
def test_mac_generation(self):
cfg.CONF.set_override('base_mac', "12:34:56:00:00:00")
with self.port() as port:
mac = port['port']['mac_address']
self.assertTrue(mac.startswith("12:34:56"))
def test_mac_generation_4octet(self):
cfg.CONF.set_override('base_mac', "12:34:56:78:00:00")
with self.port() as port:
mac = port['port']['mac_address']
self.assertTrue(mac.startswith("12:34:56:78"))
def test_bad_mac_format(self):
cfg.CONF.set_override('base_mac', "bad_mac")
try:
self.plugin._check_base_mac_format()
except Exception:
return
self.fail("No exception for illegal base_mac format")
def test_mac_exhaustion(self):
# rather than actually consuming all MAC (would take a LONG time)
# we try to allocate an already allocated mac address
cfg.CONF.set_override('mac_generation_retries', 3)
res = self._create_network(fmt=self.fmt, name='net1',
admin_state_up=True)
network = self.deserialize(self.fmt, res)
net_id = network['network']['id']
error = n_exc.MacAddressInUse(net_id=net_id, mac='00:11:22:33:44:55')
with mock.patch.object(
neutron.db.db_base_plugin_v2.NeutronDbPluginV2,
'_create_port_with_mac', side_effect=error) as create_mock:
res = self._create_port(self.fmt, net_id=net_id)
self.assertEqual(res.status_int,
webob.exc.HTTPServiceUnavailable.code)
self.assertEqual(3, create_mock.call_count)
def test_requested_duplicate_ip(self):
with self.subnet() as subnet:
with self.port(subnet=subnet) as port:
ips = port['port']['fixed_ips']
self.assertEqual(len(ips), 1)
self.assertEqual(ips[0]['ip_address'], '10.0.0.2')
self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
# Check configuring of duplicate IP
kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'],
'ip_address': ips[0]['ip_address']}]}
net_id = port['port']['network_id']
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
self.assertEqual(res.status_int, webob.exc.HTTPConflict.code)
def test_requested_subnet_id(self):
with self.subnet() as subnet:
with self.port(subnet=subnet) as port:
ips = port['port']['fixed_ips']
self.assertEqual(len(ips), 1)
self.assertEqual(ips[0]['ip_address'], '10.0.0.2')
self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
# Request a IP from specific subnet
kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id']}]}
net_id = port['port']['network_id']
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
port2 = self.deserialize(self.fmt, res)
ips = port2['port']['fixed_ips']
self.assertEqual(len(ips), 1)
self.assertEqual(ips[0]['ip_address'], '10.0.0.3')
self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
self._delete('ports', port2['port']['id'])
def test_requested_subnet_id_not_on_network(self):
with self.subnet() as subnet:
with self.port(subnet=subnet) as port:
# Create new network
res = self._create_network(fmt=self.fmt, name='net2',
admin_state_up=True)
network2 = self.deserialize(self.fmt, res)
subnet2 = self._make_subnet(self.fmt, network2, "1.1.1.1",
"1.1.1.0/24", ip_version=4)
net_id = port['port']['network_id']
# Request a IP from specific subnet
kwargs = {"fixed_ips": [{'subnet_id':
subnet2['subnet']['id']}]}
net_id = port['port']['network_id']
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
def test_overlapping_subnets(self):
with self.subnet() as subnet:
tenant_id = subnet['subnet']['tenant_id']
net_id = subnet['subnet']['network_id']
res = self._create_subnet(self.fmt,
tenant_id=tenant_id,
net_id=net_id,
cidr='10.0.0.225/28',
ip_version=4,
gateway_ip=attributes.ATTR_NOT_SPECIFIED)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_requested_subnet_id_v4_and_v6(self):
with self.subnet() as subnet:
# Get a IPv4 and IPv6 address
tenant_id = subnet['subnet']['tenant_id']
net_id = subnet['subnet']['network_id']
res = self._create_subnet(
self.fmt,
tenant_id=tenant_id,
net_id=net_id,
cidr='2607:f0d0:1002:51::/124',
ip_version=6,
gateway_ip=attributes.ATTR_NOT_SPECIFIED)
subnet2 = self.deserialize(self.fmt, res)
kwargs = {"fixed_ips":
[{'subnet_id': subnet['subnet']['id']},
{'subnet_id': subnet2['subnet']['id']}]}
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
port3 = self.deserialize(self.fmt, res)
ips = port3['port']['fixed_ips']
self.assertEqual(len(ips), 2)
self.assertIn({'ip_address': '10.0.0.2',
'subnet_id': subnet['subnet']['id']}, ips)
self.assertIn({'ip_address': '2607:f0d0:1002:51::2',
'subnet_id': subnet2['subnet']['id']}, ips)
res = self._create_port(self.fmt, net_id=net_id)
port4 = self.deserialize(self.fmt, res)
# Check that a v4 and a v6 address are allocated
ips = port4['port']['fixed_ips']
self.assertEqual(len(ips), 2)
self.assertIn({'ip_address': '10.0.0.3',
'subnet_id': subnet['subnet']['id']}, ips)
self.assertIn({'ip_address': '2607:f0d0:1002:51::3',
'subnet_id': subnet2['subnet']['id']}, ips)
self._delete('ports', port3['port']['id'])
self._delete('ports', port4['port']['id'])
def test_requested_invalid_fixed_ip_address_v6_slaac(self):
with self.subnet(gateway_ip='fe80::1',
cidr='2607:f0d0:1002:51::/64',
ip_version=6,
ipv6_address_mode=constants.IPV6_SLAAC) as subnet:
kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'],
'ip_address': '2607:f0d0:1002:51::5'}]}
net_id = subnet['subnet']['network_id']
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
@mock.patch.object(db_base_plugin_v2.NeutronDbPluginV2,
'_allocate_specific_ip')
def test_requested_fixed_ip_address_v6_slaac_router_iface(
self, alloc_specific_ip):
with self.subnet(gateway_ip='fe80::1',
cidr='fe80::/64',
ip_version=6,
ipv6_address_mode=constants.IPV6_SLAAC) as subnet:
kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'],
'ip_address': 'fe80::1'}]}
net_id = subnet['subnet']['network_id']
device_owner = constants.DEVICE_OWNER_ROUTER_INTF
res = self._create_port(self.fmt, net_id=net_id,
device_owner=device_owner, **kwargs)
port = self.deserialize(self.fmt, res)
self.assertEqual(len(port['port']['fixed_ips']), 1)
self.assertEqual(port['port']['fixed_ips'][0]['ip_address'],
'fe80::1')
self.assertFalse(alloc_specific_ip.called)
def test_requested_subnet_id_v6_slaac(self):
with self.subnet(gateway_ip='fe80::1',
cidr='2607:f0d0:1002:51::/64',
ip_version=6,
ipv6_address_mode=constants.IPV6_SLAAC) as subnet:
with self.port(subnet,
fixed_ips=[{'subnet_id':
subnet['subnet']['id']}]) as port:
port_mac = port['port']['mac_address']
subnet_cidr = subnet['subnet']['cidr']
eui_addr = str(ipv6_utils.get_ipv6_addr_by_EUI64(subnet_cidr,
port_mac))
self.assertEqual(port['port']['fixed_ips'][0]['ip_address'],
eui_addr)
def test_requested_subnet_id_v4_and_v6_slaac(self):
with self.network() as network:
with self.subnet(network) as subnet,\
self.subnet(
network,
cidr='2607:f0d0:1002:51::/64',
ip_version=6,
gateway_ip='fe80::1',
ipv6_address_mode=constants.IPV6_SLAAC) as subnet2:
with self.port(
subnet,
fixed_ips=[{'subnet_id': subnet['subnet']['id']},
{'subnet_id': subnet2['subnet']['id']}]
) as port:
ips = port['port']['fixed_ips']
self.assertEqual(len(ips), 2)
self.assertIn({'ip_address': '10.0.0.2',
'subnet_id': subnet['subnet']['id']}, ips)
port_mac = port['port']['mac_address']
subnet_cidr = subnet2['subnet']['cidr']
eui_addr = str(ipv6_utils.get_ipv6_addr_by_EUI64(
subnet_cidr, port_mac))
self.assertIn({'ip_address': eui_addr,
'subnet_id': subnet2['subnet']['id']}, ips)
def test_create_router_port_ipv4_and_ipv6_slaac_no_fixed_ips(self):
with self.network() as network:
# Create an IPv4 and an IPv6 SLAAC subnet on the network
with self.subnet(network),\
self.subnet(network,
cidr='2607:f0d0:1002:51::/64',
ip_version=6,
gateway_ip='fe80::1',
ipv6_address_mode=constants.IPV6_SLAAC):
# Create a router port without specifying fixed_ips
port = self._make_port(
self.fmt, network['network']['id'],
device_owner=constants.DEVICE_OWNER_ROUTER_INTF)
# Router port should only have an IPv4 address
fixed_ips = port['port']['fixed_ips']
self.assertEqual(1, len(fixed_ips))
self.assertEqual('10.0.0.2', fixed_ips[0]['ip_address'])
def _make_v6_subnet(self, network, ra_addr_mode):
return (self._make_subnet(self.fmt, network, gateway='fe80::1',
cidr='fe80::/64', ip_version=6,
ipv6_ra_mode=ra_addr_mode,
ipv6_address_mode=ra_addr_mode))
@staticmethod
def _calc_ipv6_addr_by_EUI64(port, subnet):
port_mac = port['port']['mac_address']
subnet_cidr = subnet['subnet']['cidr']
return str(ipv6_utils.get_ipv6_addr_by_EUI64(subnet_cidr, port_mac))
def test_ip_allocation_for_ipv6_subnet_slaac_address_mode(self):
res = self._create_network(fmt=self.fmt, name='net',
admin_state_up=True)
network = self.deserialize(self.fmt, res)
subnet = self._make_v6_subnet(network, constants.IPV6_SLAAC)
port = self._make_port(self.fmt, network['network']['id'])
self.assertEqual(1, len(port['port']['fixed_ips']))
self.assertEqual(self._calc_ipv6_addr_by_EUI64(port, subnet),
port['port']['fixed_ips'][0]['ip_address'])
def _test_create_port_with_ipv6_subnet_in_fixed_ips(self, addr_mode):
"""Test port create with an IPv6 subnet incl in fixed IPs."""
with self.network(name='net') as network:
subnet = self._make_v6_subnet(network, addr_mode)
subnet_id = subnet['subnet']['id']
fixed_ips = [{'subnet_id': subnet_id}]
with self.port(subnet=subnet, fixed_ips=fixed_ips) as port:
if addr_mode == constants.IPV6_SLAAC:
exp_ip_addr = self._calc_ipv6_addr_by_EUI64(port, subnet)
else:
exp_ip_addr = 'fe80::2'
port_fixed_ips = port['port']['fixed_ips']
self.assertEqual(1, len(port_fixed_ips))
self.assertEqual(exp_ip_addr,
port_fixed_ips[0]['ip_address'])
def test_create_port_with_ipv6_slaac_subnet_in_fixed_ips(self):
self._test_create_port_with_ipv6_subnet_in_fixed_ips(
addr_mode=constants.IPV6_SLAAC)
def test_create_port_with_ipv6_dhcp_stateful_subnet_in_fixed_ips(self):
self._test_create_port_with_ipv6_subnet_in_fixed_ips(
addr_mode=constants.DHCPV6_STATEFUL)
def test_create_port_with_multiple_ipv4_and_ipv6_subnets(self):
"""Test port create with multiple IPv4, IPv6 DHCP/SLAAC subnets."""
res = self._create_network(fmt=self.fmt, name='net',
admin_state_up=True)
network = self.deserialize(self.fmt, res)
sub_dicts = [
{'gateway': '10.0.0.1', 'cidr': '10.0.0.0/24',
'ip_version': 4, 'ra_addr_mode': None},
{'gateway': '10.0.1.1', 'cidr': '10.0.1.0/24',
'ip_version': 4, 'ra_addr_mode': None},
{'gateway': 'fe80::1', 'cidr': 'fe80::/64',
'ip_version': 6, 'ra_addr_mode': constants.IPV6_SLAAC},
{'gateway': 'fe81::1', 'cidr': 'fe81::/64',
'ip_version': 6, 'ra_addr_mode': constants.IPV6_SLAAC},
{'gateway': 'fe82::1', 'cidr': 'fe82::/64',
'ip_version': 6, 'ra_addr_mode': constants.DHCPV6_STATEFUL},
{'gateway': 'fe83::1', 'cidr': 'fe83::/64',
'ip_version': 6, 'ra_addr_mode': constants.DHCPV6_STATEFUL}]
subnets = {}
for sub_dict in sub_dicts:
subnet = self._make_subnet(
self.fmt, network,
gateway=sub_dict['gateway'],
cidr=sub_dict['cidr'],
ip_version=sub_dict['ip_version'],
ipv6_ra_mode=sub_dict['ra_addr_mode'],
ipv6_address_mode=sub_dict['ra_addr_mode'])
subnets[subnet['subnet']['id']] = sub_dict
res = self._create_port(self.fmt, net_id=network['network']['id'])
port = self.deserialize(self.fmt, res)
# Since the create port request was made without a list of fixed IPs,
# the port should be associated with addresses for one of the
# IPv4 subnets, one of the DHCPv6 subnets, and both of the IPv6
# SLAAC subnets.
self.assertEqual(4, len(port['port']['fixed_ips']))
addr_mode_count = {None: 0, constants.DHCPV6_STATEFUL: 0,
constants.IPV6_SLAAC: 0}
for fixed_ip in port['port']['fixed_ips']:
subnet_id = fixed_ip['subnet_id']
if subnet_id in subnets:
addr_mode_count[subnets[subnet_id]['ra_addr_mode']] += 1
self.assertEqual(1, addr_mode_count[None])
self.assertEqual(1, addr_mode_count[constants.DHCPV6_STATEFUL])
self.assertEqual(2, addr_mode_count[constants.IPV6_SLAAC])
def test_delete_port_with_ipv6_slaac_address(self):
"""Test that a port with an IPv6 SLAAC address can be deleted."""
res = self._create_network(fmt=self.fmt, name='net',
admin_state_up=True)
network = self.deserialize(self.fmt, res)
# Create a port that has an associated IPv6 SLAAC address
self._make_v6_subnet(network, constants.IPV6_SLAAC)
res = self._create_port(self.fmt, net_id=network['network']['id'])
port = self.deserialize(self.fmt, res)
self.assertEqual(1, len(port['port']['fixed_ips']))
# Confirm that the port can be deleted
self._delete('ports', port['port']['id'])
self._show('ports', port['port']['id'],
expected_code=webob.exc.HTTPNotFound.code)
def test_update_port_with_ipv6_slaac_subnet_in_fixed_ips(self):
"""Test port update with an IPv6 SLAAC subnet in fixed IPs."""
res = self._create_network(fmt=self.fmt, name='net',
admin_state_up=True)
network = self.deserialize(self.fmt, res)
# Create a port using an IPv4 subnet and an IPv6 SLAAC subnet
self._make_subnet(self.fmt, network, gateway='10.0.0.1',
cidr='10.0.0.0/24', ip_version=4)
subnet_v6 = self._make_v6_subnet(network, constants.IPV6_SLAAC)
res = self._create_port(self.fmt, net_id=network['network']['id'])
port = self.deserialize(self.fmt, res)
self.assertEqual(2, len(port['port']['fixed_ips']))
# Update port including only the IPv6 SLAAC subnet
data = {'port': {'fixed_ips': [{'subnet_id':
subnet_v6['subnet']['id']}]}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
# Port should only have an address corresponding to IPv6 SLAAC subnet
ips = res['port']['fixed_ips']
self.assertEqual(1, len(ips))
self.assertEqual(self._calc_ipv6_addr_by_EUI64(port, subnet_v6),
ips[0]['ip_address'])
def test_update_port_excluding_ipv6_slaac_subnet_from_fixed_ips(self):
"""Test port update excluding IPv6 SLAAC subnet from fixed ips."""
res = self._create_network(fmt=self.fmt, name='net',
admin_state_up=True)
network = self.deserialize(self.fmt, res)
# Create a port using an IPv4 subnet and an IPv6 SLAAC subnet
subnet_v4 = self._make_subnet(self.fmt, network, gateway='10.0.0.1',
cidr='10.0.0.0/24', ip_version=4)
subnet_v6 = self._make_v6_subnet(network, constants.IPV6_SLAAC)
res = self._create_port(self.fmt, net_id=network['network']['id'])
port = self.deserialize(self.fmt, res)
self.assertEqual(2, len(port['port']['fixed_ips']))
# Update port including only the IPv4 subnet
data = {'port': {'fixed_ips': [{'subnet_id':
subnet_v4['subnet']['id'],
'ip_address': "10.0.0.10"}]}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
# Port should still have an addr corresponding to IPv6 SLAAC subnet
ips = res['port']['fixed_ips']
self.assertEqual(2, len(ips))
eui_addr = self._calc_ipv6_addr_by_EUI64(port, subnet_v6)
expected_v6_ip = {'subnet_id': subnet_v6['subnet']['id'],
'ip_address': eui_addr}
self.assertIn(expected_v6_ip, ips)
def test_ip_allocation_for_ipv6_2_subnet_slaac_mode(self):
res = self._create_network(fmt=self.fmt, name='net',
admin_state_up=True)
network = self.deserialize(self.fmt, res)
v6_subnet_1 = self._make_subnet(self.fmt, network,
gateway='2001:100::1',
cidr='2001:100::0/64',
ip_version=6,
ipv6_ra_mode=constants.IPV6_SLAAC)
v6_subnet_2 = self._make_subnet(self.fmt, network,
gateway='2001:200::1',
cidr='2001:200::0/64',
ip_version=6,
ipv6_ra_mode=constants.IPV6_SLAAC)
port = self._make_port(self.fmt, network['network']['id'])
port_mac = port['port']['mac_address']
cidr_1 = v6_subnet_1['subnet']['cidr']
cidr_2 = v6_subnet_2['subnet']['cidr']
eui_addr_1 = str(ipv6_utils.get_ipv6_addr_by_EUI64(cidr_1,
port_mac))
eui_addr_2 = str(ipv6_utils.get_ipv6_addr_by_EUI64(cidr_2,
port_mac))
self.assertEqual({eui_addr_1, eui_addr_2},
{fixed_ip['ip_address'] for fixed_ip in
port['port']['fixed_ips']})
def test_range_allocation(self):
with self.subnet(gateway_ip='10.0.0.3',
cidr='10.0.0.0/29') as subnet:
kwargs = {"fixed_ips":
[{'subnet_id': subnet['subnet']['id']},
{'subnet_id': subnet['subnet']['id']},
{'subnet_id': subnet['subnet']['id']},
{'subnet_id': subnet['subnet']['id']},
{'subnet_id': subnet['subnet']['id']}]}
net_id = subnet['subnet']['network_id']
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
port = self.deserialize(self.fmt, res)
ips = port['port']['fixed_ips']
self.assertEqual(len(ips), 5)
alloc = ['10.0.0.1', '10.0.0.2', '10.0.0.4', '10.0.0.5',
'10.0.0.6']
for ip in ips:
self.assertIn(ip['ip_address'], alloc)
self.assertEqual(ip['subnet_id'],
subnet['subnet']['id'])
alloc.remove(ip['ip_address'])
self.assertEqual(len(alloc), 0)
self._delete('ports', port['port']['id'])
with self.subnet(gateway_ip='11.0.0.6',
cidr='11.0.0.0/29') as subnet:
kwargs = {"fixed_ips":
[{'subnet_id': subnet['subnet']['id']},
{'subnet_id': subnet['subnet']['id']},
{'subnet_id': subnet['subnet']['id']},
{'subnet_id': subnet['subnet']['id']},
{'subnet_id': subnet['subnet']['id']}]}
net_id = subnet['subnet']['network_id']
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
port = self.deserialize(self.fmt, res)
ips = port['port']['fixed_ips']
self.assertEqual(len(ips), 5)
alloc = ['11.0.0.1', '11.0.0.2', '11.0.0.3', '11.0.0.4',
'11.0.0.5']
for ip in ips:
self.assertIn(ip['ip_address'], alloc)
self.assertEqual(ip['subnet_id'],
subnet['subnet']['id'])
alloc.remove(ip['ip_address'])
self.assertEqual(len(alloc), 0)
self._delete('ports', port['port']['id'])
def test_requested_invalid_fixed_ips(self):
with self.subnet() as subnet:
with self.port(subnet=subnet) as port:
ips = port['port']['fixed_ips']
self.assertEqual(len(ips), 1)
self.assertEqual(ips[0]['ip_address'], '10.0.0.2')
self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
# Test invalid subnet_id
kwargs = {"fixed_ips":
[{'subnet_id': subnet['subnet']['id']},
{'subnet_id':
'00000000-ffff-ffff-ffff-000000000000'}]}
net_id = port['port']['network_id']
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
port2 = self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code)
# Test invalid IP address on specified subnet_id
kwargs = {"fixed_ips":
[{'subnet_id': subnet['subnet']['id'],
'ip_address': '1.1.1.1'}]}
net_id = port['port']['network_id']
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
port2 = self.deserialize(self.fmt, res)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
# Test invalid addresses - IP's not on subnet or network
# address or broadcast address
bad_ips = ['1.1.1.1', '10.0.0.0', '10.0.0.255']
net_id = port['port']['network_id']
for ip in bad_ips:
kwargs = {"fixed_ips": [{'ip_address': ip}]}
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
port2 = self.deserialize(self.fmt, res)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
# Enable allocation of gateway address
kwargs = {"fixed_ips":
[{'subnet_id': subnet['subnet']['id'],
'ip_address': '10.0.0.1'}]}
net_id = port['port']['network_id']
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
port2 = self.deserialize(self.fmt, res)
ips = port2['port']['fixed_ips']
self.assertEqual(len(ips), 1)
self.assertEqual(ips[0]['ip_address'], '10.0.0.1')
self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
self._delete('ports', port2['port']['id'])
def test_invalid_ip(self):
with self.subnet() as subnet:
# Allocate specific IP
kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'],
'ip_address': '1011.0.0.5'}]}
net_id = subnet['subnet']['network_id']
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_requested_split(self):
with self.subnet() as subnet:
with self.port(subnet=subnet) as port:
ports_to_delete = []
ips = port['port']['fixed_ips']
self.assertEqual(len(ips), 1)
self.assertEqual(ips[0]['ip_address'], '10.0.0.2')
self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
# Allocate specific IP
kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'],
'ip_address': '10.0.0.5'}]}
net_id = port['port']['network_id']
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
port2 = self.deserialize(self.fmt, res)
ports_to_delete.append(port2)
ips = port2['port']['fixed_ips']
self.assertEqual(len(ips), 1)
self.assertEqual(ips[0]['ip_address'], '10.0.0.5')
self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
# Allocate specific IP's
allocated = ['10.0.0.3', '10.0.0.4', '10.0.0.6']
for a in allocated:
res = self._create_port(self.fmt, net_id=net_id)
port2 = self.deserialize(self.fmt, res)
ports_to_delete.append(port2)
ips = port2['port']['fixed_ips']
self.assertEqual(len(ips), 1)
self.assertEqual(ips[0]['ip_address'], a)
self.assertEqual(ips[0]['subnet_id'],
subnet['subnet']['id'])
for p in ports_to_delete:
self._delete('ports', p['port']['id'])
def test_duplicate_ips(self):
with self.subnet() as subnet:
# Allocate specific IP
kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'],
'ip_address': '10.0.0.5'},
{'subnet_id': subnet['subnet']['id'],
'ip_address': '10.0.0.5'}]}
net_id = subnet['subnet']['network_id']
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_fixed_ip_invalid_subnet_id(self):
with self.subnet() as subnet:
# Allocate specific IP
kwargs = {"fixed_ips": [{'subnet_id': 'i am invalid',
'ip_address': '10.0.0.5'}]}
net_id = subnet['subnet']['network_id']
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_fixed_ip_invalid_ip(self):
with self.subnet() as subnet:
# Allocate specific IP
kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'],
'ip_address': '10.0.0.55555'}]}
net_id = subnet['subnet']['network_id']
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_requested_ips_only(self):
with self.subnet() as subnet:
with self.port(subnet=subnet) as port:
ips = port['port']['fixed_ips']
self.assertEqual(len(ips), 1)
self.assertEqual(ips[0]['ip_address'], '10.0.0.2')
self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
ips_only = ['10.0.0.18', '10.0.0.20', '10.0.0.22', '10.0.0.21',
'10.0.0.3', '10.0.0.17', '10.0.0.19']
ports_to_delete = []
for i in ips_only:
kwargs = {"fixed_ips": [{'ip_address': i}]}
net_id = port['port']['network_id']
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
port = self.deserialize(self.fmt, res)
ports_to_delete.append(port)
ips = port['port']['fixed_ips']
self.assertEqual(len(ips), 1)
self.assertEqual(ips[0]['ip_address'], i)
self.assertEqual(ips[0]['subnet_id'],
subnet['subnet']['id'])
for p in ports_to_delete:
self._delete('ports', p['port']['id'])
def test_invalid_admin_state(self):
with self.network() as network:
data = {'port': {'network_id': network['network']['id'],
'tenant_id': network['network']['tenant_id'],
'admin_state_up': 7,
'fixed_ips': []}}
port_req = self.new_create_request('ports', data)
res = port_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_invalid_mac_address(self):
with self.network() as network:
data = {'port': {'network_id': network['network']['id'],
'tenant_id': network['network']['tenant_id'],
'admin_state_up': 1,
'mac_address': 'mac',
'fixed_ips': []}}
port_req = self.new_create_request('ports', data)
res = port_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_max_fixed_ips_exceeded(self):
with self.subnet(gateway_ip='10.0.0.3',
cidr='10.0.0.0/24') as subnet:
kwargs = {"fixed_ips":
[{'subnet_id': subnet['subnet']['id']},
{'subnet_id': subnet['subnet']['id']},
{'subnet_id': subnet['subnet']['id']},
{'subnet_id': subnet['subnet']['id']},
{'subnet_id': subnet['subnet']['id']},
{'subnet_id': subnet['subnet']['id']}]}
net_id = subnet['subnet']['network_id']
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
def test_update_max_fixed_ips_exceeded(self):
with self.subnet(gateway_ip='10.0.0.3',
cidr='10.0.0.0/24') as subnet:
with self.port(subnet) as port:
data = {'port': {'fixed_ips':
[{'subnet_id': subnet['subnet']['id'],
'ip_address': '10.0.0.2'},
{'subnet_id': subnet['subnet']['id'],
'ip_address': '10.0.0.4'},
{'subnet_id': subnet['subnet']['id']},
{'subnet_id': subnet['subnet']['id']},
{'subnet_id': subnet['subnet']['id']},
{'subnet_id': subnet['subnet']['id']}]}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
def test_delete_ports_by_device_id(self):
plugin = manager.NeutronManager.get_plugin()
ctx = context.get_admin_context()
with self.subnet() as subnet:
with self.port(subnet=subnet, device_id='owner1') as p1,\
self.port(subnet=subnet, device_id='owner1') as p2,\
self.port(subnet=subnet, device_id='owner2') as p3:
network_id = subnet['subnet']['network_id']
plugin.delete_ports_by_device_id(ctx, 'owner1',
network_id)
self._show('ports', p1['port']['id'],
expected_code=webob.exc.HTTPNotFound.code)
self._show('ports', p2['port']['id'],
expected_code=webob.exc.HTTPNotFound.code)
self._show('ports', p3['port']['id'],
expected_code=webob.exc.HTTPOk.code)
def _test_delete_ports_by_device_id_second_call_failure(self, plugin):
ctx = context.get_admin_context()
with self.subnet() as subnet:
with self.port(subnet=subnet, device_id='owner1') as p1,\
self.port(subnet=subnet, device_id='owner1') as p2,\
self.port(subnet=subnet, device_id='owner2') as p3:
orig = plugin.delete_port
with mock.patch.object(plugin, 'delete_port') as del_port:
def side_effect(*args, **kwargs):
return self._fail_second_call(del_port, orig,
*args, **kwargs)
del_port.side_effect = side_effect
network_id = subnet['subnet']['network_id']
self.assertRaises(n_exc.NeutronException,
plugin.delete_ports_by_device_id,
ctx, 'owner1', network_id)
statuses = {
self._show_response('ports', p['port']['id']).status_int
for p in [p1, p2]}
expected = {webob.exc.HTTPNotFound.code, webob.exc.HTTPOk.code}
self.assertEqual(expected, statuses)
self._show('ports', p3['port']['id'],
expected_code=webob.exc.HTTPOk.code)
def test_delete_ports_by_device_id_second_call_failure(self):
plugin = manager.NeutronManager.get_plugin()
self._test_delete_ports_by_device_id_second_call_failure(plugin)
def _test_delete_ports_ignores_port_not_found(self, plugin):
ctx = context.get_admin_context()
with self.subnet() as subnet:
with self.port(subnet=subnet, device_id='owner1') as p,\
mock.patch.object(plugin, 'delete_port') as del_port:
del_port.side_effect = n_exc.PortNotFound(
port_id=p['port']['id']
)
network_id = subnet['subnet']['network_id']
try:
plugin.delete_ports_by_device_id(ctx, 'owner1',
network_id)
except n_exc.PortNotFound:
self.fail("delete_ports_by_device_id unexpectedly raised "
"a PortNotFound exception. It should ignore "
"this exception because it is often called at "
"the same time other concurrent operations are "
"deleting some of the same ports.")
def test_delete_ports_ignores_port_not_found(self):
plugin = manager.NeutronManager.get_plugin()
self._test_delete_ports_ignores_port_not_found(plugin)
class TestNetworksV2(NeutronDbPluginV2TestCase):
# NOTE(cerberus): successful network update and delete are
# effectively tested above
def test_create_network(self):
name = 'net1'
keys = [('subnets', []), ('name', name), ('admin_state_up', True),
('status', self.net_create_status), ('shared', False)]
with self.network(name=name) as net:
for k, v in keys:
self.assertEqual(net['network'][k], v)
def test_create_public_network(self):
name = 'public_net'
keys = [('subnets', []), ('name', name), ('admin_state_up', True),
('status', self.net_create_status), ('shared', True)]
with self.network(name=name, shared=True) as net:
for k, v in keys:
self.assertEqual(net['network'][k], v)
def test_create_public_network_no_admin_tenant(self):
name = 'public_net'
with testlib_api.ExpectedException(
webob.exc.HTTPClientError) as ctx_manager:
with self.network(name=name,
shared=True,
tenant_id="another_tenant",
set_context=True):
pass
self.assertEqual(ctx_manager.exception.code,
webob.exc.HTTPForbidden.code)
def test_update_network(self):
with self.network() as network:
data = {'network': {'name': 'a_brand_new_name'}}
req = self.new_update_request('networks',
data,
network['network']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(res['network']['name'],
data['network']['name'])
def test_update_shared_network_noadmin_returns_403(self):
with self.network(shared=True) as network:
data = {'network': {'name': 'a_brand_new_name'}}
req = self.new_update_request('networks',
data,
network['network']['id'])
req.environ['neutron.context'] = context.Context('', 'somebody')
res = req.get_response(self.api)
# The API layer always returns 404 on updates in place of 403
self.assertEqual(res.status_int, webob.exc.HTTPNotFound.code)
def test_update_network_set_shared(self):
with self.network(shared=False) as network:
data = {'network': {'shared': True}}
req = self.new_update_request('networks',
data,
network['network']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertTrue(res['network']['shared'])
def test_update_network_set_shared_owner_returns_403(self):
with self.network(shared=False) as network:
net_owner = network['network']['tenant_id']
data = {'network': {'shared': True}}
req = self.new_update_request('networks',
data,
network['network']['id'])
req.environ['neutron.context'] = context.Context('u', net_owner)
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPForbidden.code)
def test_update_network_with_subnet_set_shared(self):
with self.network(shared=False) as network:
with self.subnet(network=network) as subnet:
data = {'network': {'shared': True}}
req = self.new_update_request('networks',
data,
network['network']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertTrue(res['network']['shared'])
# must query db to see whether subnet's shared attribute
# has been updated or not
ctx = context.Context('', '', is_admin=True)
subnet_db = manager.NeutronManager.get_plugin()._get_subnet(
ctx, subnet['subnet']['id'])
self.assertEqual(subnet_db['shared'], True)
def test_update_network_set_not_shared_single_tenant(self):
with self.network(shared=True) as network:
res1 = self._create_port(self.fmt,
network['network']['id'],
webob.exc.HTTPCreated.code,
tenant_id=network['network']['tenant_id'],
set_context=True)
data = {'network': {'shared': False}}
req = self.new_update_request('networks',
data,
network['network']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertFalse(res['network']['shared'])
port1 = self.deserialize(self.fmt, res1)
self._delete('ports', port1['port']['id'])
def test_update_network_set_not_shared_other_tenant_returns_409(self):
with self.network(shared=True) as network:
res1 = self._create_port(self.fmt,
network['network']['id'],
webob.exc.HTTPCreated.code,
tenant_id='somebody_else',
set_context=True)
data = {'network': {'shared': False}}
req = self.new_update_request('networks',
data,
network['network']['id'])
self.assertEqual(req.get_response(self.api).status_int,
webob.exc.HTTPConflict.code)
port1 = self.deserialize(self.fmt, res1)
self._delete('ports', port1['port']['id'])
def test_update_network_set_not_shared_multi_tenants_returns_409(self):
with self.network(shared=True) as network:
res1 = self._create_port(self.fmt,
network['network']['id'],
webob.exc.HTTPCreated.code,
tenant_id='somebody_else',
set_context=True)
res2 = self._create_port(self.fmt,
network['network']['id'],
webob.exc.HTTPCreated.code,
tenant_id=network['network']['tenant_id'],
set_context=True)
data = {'network': {'shared': False}}
req = self.new_update_request('networks',
data,
network['network']['id'])
self.assertEqual(req.get_response(self.api).status_int,
webob.exc.HTTPConflict.code)
port1 = self.deserialize(self.fmt, res1)
port2 = self.deserialize(self.fmt, res2)
self._delete('ports', port1['port']['id'])
self._delete('ports', port2['port']['id'])
def test_update_network_set_not_shared_multi_tenants2_returns_409(self):
with self.network(shared=True) as network:
res1 = self._create_port(self.fmt,
network['network']['id'],
webob.exc.HTTPCreated.code,
tenant_id='somebody_else',
set_context=True)
self._create_subnet(self.fmt,
network['network']['id'],
'10.0.0.0/24',
webob.exc.HTTPCreated.code,
tenant_id=network['network']['tenant_id'],
set_context=True)
data = {'network': {'shared': False}}
req = self.new_update_request('networks',
data,
network['network']['id'])
self.assertEqual(req.get_response(self.api).status_int,
webob.exc.HTTPConflict.code)
port1 = self.deserialize(self.fmt, res1)
self._delete('ports', port1['port']['id'])
def test_create_networks_bulk_native(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk network create")
res = self._create_network_bulk(self.fmt, 2, 'test', True)
self._validate_behavior_on_bulk_success(res, 'networks')
def test_create_networks_bulk_native_quotas(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk network create")
quota = 4
cfg.CONF.set_override('quota_network', quota, group='QUOTAS')
res = self._create_network_bulk(self.fmt, quota + 1, 'test', True)
self._validate_behavior_on_bulk_failure(
res, 'networks',
errcode=webob.exc.HTTPConflict.code)
def test_create_networks_bulk_tenants_and_quotas(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk network create")
quota = 2
cfg.CONF.set_override('quota_network', quota, group='QUOTAS')
networks = [{'network': {'name': 'n1',
'tenant_id': self._tenant_id}},
{'network': {'name': 'n2',
'tenant_id': self._tenant_id}},
{'network': {'name': 'n1',
'tenant_id': 't1'}},
{'network': {'name': 'n2',
'tenant_id': 't1'}}]
res = self._create_bulk_from_list(self.fmt, 'network', networks)
self.assertEqual(res.status_int, webob.exc.HTTPCreated.code)
def test_create_networks_bulk_tenants_and_quotas_fail(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk network create")
quota = 2
cfg.CONF.set_override('quota_network', quota, group='QUOTAS')
networks = [{'network': {'name': 'n1',
'tenant_id': self._tenant_id}},
{'network': {'name': 'n2',
'tenant_id': self._tenant_id}},
{'network': {'name': 'n1',
'tenant_id': 't1'}},
{'network': {'name': 'n3',
'tenant_id': self._tenant_id}},
{'network': {'name': 'n2',
'tenant_id': 't1'}}]
res = self._create_bulk_from_list(self.fmt, 'network', networks)
self.assertEqual(res.status_int, webob.exc.HTTPConflict.code)
def test_create_networks_bulk_emulated(self):
real_has_attr = hasattr
#ensures the API choose the emulation code path
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
return real_has_attr(item, attr)
with mock.patch('six.moves.builtins.hasattr',
new=fakehasattr):
res = self._create_network_bulk(self.fmt, 2, 'test', True)
self._validate_behavior_on_bulk_success(res, 'networks')
def test_create_networks_bulk_wrong_input(self):
res = self._create_network_bulk(self.fmt, 2, 'test', True,
override={1:
{'admin_state_up': 'doh'}})
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
req = self.new_list_request('networks')
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPOk.code)
nets = self.deserialize(self.fmt, res)
self.assertEqual(len(nets['networks']), 0)
def test_create_networks_bulk_emulated_plugin_failure(self):
real_has_attr = hasattr
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
return real_has_attr(item, attr)
orig = manager.NeutronManager.get_plugin().create_network
#ensures the API choose the emulation code path
with mock.patch('six.moves.builtins.hasattr',
new=fakehasattr):
method_to_patch = _get_create_db_method('network')
with mock.patch.object(manager.NeutronManager.get_plugin(),
method_to_patch) as patched_plugin:
def side_effect(*args, **kwargs):
return self._fail_second_call(patched_plugin, orig,
*args, **kwargs)
patched_plugin.side_effect = side_effect
res = self._create_network_bulk(self.fmt, 2, 'test', True)
# We expect a 500 as we injected a fault in the plugin
self._validate_behavior_on_bulk_failure(
res, 'networks', webob.exc.HTTPServerError.code
)
def test_create_networks_bulk_native_plugin_failure(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk network create")
orig = manager.NeutronManager.get_plugin().create_network
method_to_patch = _get_create_db_method('network')
with mock.patch.object(manager.NeutronManager.get_plugin(),
method_to_patch) as patched_plugin:
def side_effect(*args, **kwargs):
return self._fail_second_call(patched_plugin, orig,
*args, **kwargs)
patched_plugin.side_effect = side_effect
res = self._create_network_bulk(self.fmt, 2, 'test', True)
# We expect a 500 as we injected a fault in the plugin
self._validate_behavior_on_bulk_failure(
res, 'networks', webob.exc.HTTPServerError.code
)
def test_list_networks(self):
with self.network() as v1, self.network() as v2, self.network() as v3:
networks = (v1, v2, v3)
self._test_list_resources('network', networks)
def test_list_networks_with_sort_native(self):
if self._skip_native_sorting:
self.skipTest("Skip test for not implemented sorting feature")
with self.network(admin_status_up=True, name='net1') as net1,\
self.network(admin_status_up=False, name='net2') as net2,\
self.network(admin_status_up=False, name='net3') as net3:
self._test_list_with_sort('network', (net3, net2, net1),
[('admin_state_up', 'asc'),
('name', 'desc')])
def test_list_networks_with_sort_extended_attr_native_returns_400(self):
if self._skip_native_sorting:
self.skipTest("Skip test for not implemented sorting feature")
with self.network(admin_status_up=True, name='net1'),\
self.network(admin_status_up=False, name='net2'),\
self.network(admin_status_up=False, name='net3'):
req = self.new_list_request(
'networks',
params='sort_key=provider:segmentation_id&sort_dir=asc')
res = req.get_response(self.api)
self.assertEqual(webob.exc.HTTPClientError.code, res.status_int)
def test_list_networks_with_sort_remote_key_native_returns_400(self):
if self._skip_native_sorting:
self.skipTest("Skip test for not implemented sorting feature")
with self.network(admin_status_up=True, name='net1'),\
self.network(admin_status_up=False, name='net2'),\
self.network(admin_status_up=False, name='net3'):
req = self.new_list_request(
'networks', params='sort_key=subnets&sort_dir=asc')
res = req.get_response(self.api)
self.assertEqual(webob.exc.HTTPClientError.code, res.status_int)
def test_list_networks_with_sort_emulated(self):
helper_patcher = mock.patch(
'neutron.api.v2.base.Controller._get_sorting_helper',
new=_fake_get_sorting_helper)
helper_patcher.start()
with self.network(admin_status_up=True, name='net1') as net1,\
self.network(admin_status_up=False, name='net2') as net2,\
self.network(admin_status_up=False, name='net3') as net3:
self._test_list_with_sort('network', (net3, net2, net1),
[('admin_state_up', 'asc'),
('name', 'desc')])
def test_list_networks_with_pagination_native(self):
if self._skip_native_pagination:
self.skipTest("Skip test for not implemented pagination feature")
with self.network(name='net1') as net1,\
self.network(name='net2') as net2,\
self.network(name='net3') as net3:
self._test_list_with_pagination('network',
(net1, net2, net3),
('name', 'asc'), 2, 2)
def test_list_networks_with_pagination_emulated(self):
helper_patcher = mock.patch(
'neutron.api.v2.base.Controller._get_pagination_helper',
new=_fake_get_pagination_helper)
helper_patcher.start()
with self.network(name='net1') as net1,\
self.network(name='net2') as net2,\
self.network(name='net3') as net3:
self._test_list_with_pagination('network',
(net1, net2, net3),
('name', 'asc'), 2, 2)
def test_list_networks_without_pk_in_fields_pagination_emulated(self):
helper_patcher = mock.patch(
'neutron.api.v2.base.Controller._get_pagination_helper',
new=_fake_get_pagination_helper)
helper_patcher.start()
with self.network(name='net1', shared=True) as net1,\
self.network(name='net2', shared=False) as net2,\
self.network(name='net3', shared=True) as net3:
self._test_list_with_pagination('network',
(net1, net2, net3),
('name', 'asc'), 2, 2,
query_params="fields=name",
verify_key='name')
def test_list_networks_without_pk_in_fields_pagination_native(self):
if self._skip_native_pagination:
self.skipTest("Skip test for not implemented pagination feature")
with self.network(name='net1') as net1,\
self.network(name='net2') as net2,\
self.network(name='net3') as net3:
self._test_list_with_pagination('network',
(net1, net2, net3),
('name', 'asc'), 2, 2,
query_params="fields=shared",
verify_key='shared')
def test_list_networks_with_pagination_reverse_native(self):
if self._skip_native_pagination:
self.skipTest("Skip test for not implemented pagination feature")
with self.network(name='net1') as net1,\
self.network(name='net2') as net2,\
self.network(name='net3') as net3:
self._test_list_with_pagination_reverse('network',
(net1, net2, net3),
('name', 'asc'), 2, 2)
def test_list_networks_with_pagination_reverse_emulated(self):
helper_patcher = mock.patch(
'neutron.api.v2.base.Controller._get_pagination_helper',
new=_fake_get_pagination_helper)
helper_patcher.start()
with self.network(name='net1') as net1,\
self.network(name='net2') as net2,\
self.network(name='net3') as net3:
self._test_list_with_pagination_reverse('network',
(net1, net2, net3),
('name', 'asc'), 2, 2)
def test_list_networks_with_parameters(self):
with self.network(name='net1', admin_state_up=False) as net1,\
self.network(name='net2') as net2:
query_params = 'admin_state_up=False'
self._test_list_resources('network', [net1],
query_params=query_params)
query_params = 'admin_state_up=True'
self._test_list_resources('network', [net2],
query_params=query_params)
def test_list_networks_with_fields(self):
with self.network(name='net1') as net1:
req = self.new_list_request('networks',
params='fields=name')
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(1, len(res['networks']))
self.assertEqual(res['networks'][0]['name'],
net1['network']['name'])
self.assertIsNone(res['networks'][0].get('id'))
def test_list_networks_with_parameters_invalid_values(self):
with self.network(name='net1', admin_state_up=False),\
self.network(name='net2'):
req = self.new_list_request('networks',
params='admin_state_up=fake')
res = req.get_response(self.api)
self.assertEqual(webob.exc.HTTPClientError.code, res.status_int)
def test_list_shared_networks_with_non_admin_user(self):
with self.network(shared=False,
name='net1',
tenant_id='tenant1') as net1,\
self.network(shared=True,
name='net2',
tenant_id='another_tenant') as net2,\
self.network(shared=False,
name='net3',
tenant_id='another_tenant'):
ctx = context.Context(user_id='non_admin',
tenant_id='tenant1',
is_admin=False)
self._test_list_resources('network', (net1, net2), ctx)
def test_show_network(self):
with self.network(name='net1') as net:
req = self.new_show_request('networks', net['network']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(res['network']['name'],
net['network']['name'])
def test_show_network_with_subnet(self):
with self.network(name='net1') as net:
with self.subnet(net) as subnet:
req = self.new_show_request('networks', net['network']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(res['network']['subnets'][0],
subnet['subnet']['id'])
def test_invalid_admin_status(self):
value = [[7, False, webob.exc.HTTPClientError.code],
[True, True, webob.exc.HTTPCreated.code],
["True", True, webob.exc.HTTPCreated.code],
["true", True, webob.exc.HTTPCreated.code],
[1, True, webob.exc.HTTPCreated.code],
["False", False, webob.exc.HTTPCreated.code],
[False, False, webob.exc.HTTPCreated.code],
["false", False, webob.exc.HTTPCreated.code],
["7", False, webob.exc.HTTPClientError.code]]
for v in value:
data = {'network': {'name': 'net',
'admin_state_up': v[0],
'tenant_id': self._tenant_id}}
network_req = self.new_create_request('networks', data)
req = network_req.get_response(self.api)
self.assertEqual(req.status_int, v[2])
if v[2] == webob.exc.HTTPCreated.code:
res = self.deserialize(self.fmt, req)
self.assertEqual(res['network']['admin_state_up'], v[1])
class TestSubnetsV2(NeutronDbPluginV2TestCase):
def _test_create_subnet(self, network=None, expected=None, **kwargs):
keys = kwargs.copy()
keys.setdefault('cidr', '10.0.0.0/24')
keys.setdefault('ip_version', 4)
keys.setdefault('enable_dhcp', True)
with self.subnet(network=network, **keys) as subnet:
# verify the response has each key with the correct value
self._validate_resource(subnet, keys, 'subnet')
# verify the configured validations are correct
if expected:
self._compare_resource(subnet, expected, 'subnet')
self._delete('subnets', subnet['subnet']['id'])
return subnet
def test_create_subnet(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
subnet = self._test_create_subnet(gateway_ip=gateway_ip,
cidr=cidr)
self.assertEqual(4, subnet['subnet']['ip_version'])
self.assertIn('name', subnet['subnet'])
def test_create_subnet_with_network_different_tenant(self):
with self.network(shared=False, tenant_id='tenant1') as network:
ctx = context.Context(user_id='non_admin',
tenant_id='tenant2',
is_admin=False)
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '10.0.2.0/24',
'ip_version': '4',
'gateway_ip': '10.0.2.1'}}
req = self.new_create_request('subnets', data,
self.fmt, context=ctx)
res = req.get_response(self.api)
self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int)
def test_create_two_subnets(self):
gateway_ips = ['10.0.0.1', '10.0.1.1']
cidrs = ['10.0.0.0/24', '10.0.1.0/24']
with self.network() as network:
with self.subnet(network=network,
gateway_ip=gateway_ips[0],
cidr=cidrs[0]):
with self.subnet(network=network,
gateway_ip=gateway_ips[1],
cidr=cidrs[1]):
net_req = self.new_show_request('networks',
network['network']['id'])
raw_res = net_req.get_response(self.api)
net_res = self.deserialize(self.fmt, raw_res)
for subnet_id in net_res['network']['subnets']:
sub_req = self.new_show_request('subnets', subnet_id)
raw_res = sub_req.get_response(self.api)
sub_res = self.deserialize(self.fmt, raw_res)
self.assertIn(sub_res['subnet']['cidr'], cidrs)
self.assertIn(sub_res['subnet']['gateway_ip'],
gateway_ips)
def test_create_two_subnets_same_cidr_returns_400(self):
gateway_ip_1 = '10.0.0.1'
cidr_1 = '10.0.0.0/24'
gateway_ip_2 = '10.0.0.10'
cidr_2 = '10.0.0.0/24'
with self.network() as network:
with self.subnet(network=network,
gateway_ip=gateway_ip_1,
cidr=cidr_1):
with testlib_api.ExpectedException(
webob.exc.HTTPClientError) as ctx_manager:
with self.subnet(network=network,
gateway_ip=gateway_ip_2,
cidr=cidr_2):
pass
self.assertEqual(ctx_manager.exception.code,
webob.exc.HTTPClientError.code)
def test_create_subnet_bad_V4_cidr(self):
with self.network() as network:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '10.0.2.0',
'ip_version': '4',
'tenant_id': network['network']['tenant_id'],
'gateway_ip': '10.0.2.1'}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_create_subnet_no_ip_version(self):
with self.network() as network:
cfg.CONF.set_override('default_ipv4_subnet_pool', None)
cfg.CONF.set_override('default_ipv6_subnet_pool', None)
data = {'subnet': {'network_id': network['network']['id'],
'tenant_id': network['network']['tenant_id']}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_create_subnet_only_ip_version_v6_no_pool(self):
with self.network() as network:
tenant_id = network['network']['tenant_id']
cfg.CONF.set_override('default_ipv6_subnet_pool', None)
data = {'subnet': {'network_id': network['network']['id'],
'ip_version': '6',
'tenant_id': tenant_id}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_create_subnet_only_ip_version_v4(self):
with self.network() as network:
tenant_id = network['network']['tenant_id']
subnetpool_prefix = '10.0.0.0/8'
with self.subnetpool(prefixes=[subnetpool_prefix],
admin=False,
name="My subnet pool",
tenant_id=tenant_id,
min_prefixlen='25') as subnetpool:
subnetpool_id = subnetpool['subnetpool']['id']
cfg.CONF.set_override('default_ipv4_subnet_pool',
subnetpool_id)
data = {'subnet': {'network_id': network['network']['id'],
'ip_version': '4',
'prefixlen': '27',
'tenant_id': tenant_id}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
subnet = self.deserialize(self.fmt, res)['subnet']
ip_net = netaddr.IPNetwork(subnet['cidr'])
self.assertTrue(ip_net in netaddr.IPNetwork(subnetpool_prefix))
self.assertEqual(27, ip_net.prefixlen)
self.assertEqual(subnetpool_id, subnet['subnetpool_id'])
def test_create_subnet_only_ip_version_v6(self):
with self.network() as network:
tenant_id = network['network']['tenant_id']
subnetpool_prefix = '2000::/56'
with self.subnetpool(prefixes=[subnetpool_prefix],
admin=False,
name="My ipv6 subnet pool",
tenant_id=tenant_id,
min_prefixlen='64') as subnetpool:
subnetpool_id = subnetpool['subnetpool']['id']
cfg.CONF.set_override('default_ipv6_subnet_pool',
subnetpool_id)
data = {'subnet': {'network_id': network['network']['id'],
'ip_version': '6',
'tenant_id': tenant_id}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
subnet = self.deserialize(self.fmt, res)['subnet']
self.assertEqual(subnetpool_id, subnet['subnetpool_id'])
ip_net = netaddr.IPNetwork(subnet['cidr'])
self.assertTrue(ip_net in netaddr.IPNetwork(subnetpool_prefix))
self.assertEqual(64, ip_net.prefixlen)
def test_create_subnet_bad_V4_cidr_prefix_len(self):
with self.network() as network:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': constants.IPv4_ANY,
'ip_version': '4',
'tenant_id': network['network']['tenant_id'],
'gateway_ip': '0.0.0.1'}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_create_subnet_bad_V6_cidr(self):
with self.network() as network:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': 'fe80::',
'ip_version': '6',
'tenant_id': network['network']['tenant_id'],
'gateway_ip': 'fe80::1'}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_create_subnet_V6_slaac_big_prefix(self):
with self.network() as network:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '2014::/65',
'ip_version': '6',
'tenant_id': network['network']['tenant_id'],
'gateway_ip': 'fe80::1',
'ipv6_address_mode': 'slaac'}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(webob.exc.HTTPClientError.code, res.status_int)
def test_create_2_subnets_overlapping_cidr_allowed_returns_200(self):
cidr_1 = '10.0.0.0/23'
cidr_2 = '10.0.0.0/24'
cfg.CONF.set_override('allow_overlapping_ips', True)
with self.subnet(cidr=cidr_1), self.subnet(cidr=cidr_2):
pass
def test_create_2_subnets_overlapping_cidr_not_allowed_returns_400(self):
cidr_1 = '10.0.0.0/23'
cidr_2 = '10.0.0.0/24'
cfg.CONF.set_override('allow_overlapping_ips', False)
with testlib_api.ExpectedException(
webob.exc.HTTPClientError) as ctx_manager:
with self.subnet(cidr=cidr_1), self.subnet(cidr=cidr_2):
pass
self.assertEqual(ctx_manager.exception.code,
webob.exc.HTTPClientError.code)
def test_create_subnets_bulk_native(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk subnet create")
with self.network() as net:
res = self._create_subnet_bulk(self.fmt, 2, net['network']['id'],
'test')
self._validate_behavior_on_bulk_success(res, 'subnets')
def test_create_subnets_bulk_emulated(self):
real_has_attr = hasattr
#ensures the API choose the emulation code path
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
return real_has_attr(item, attr)
with mock.patch('six.moves.builtins.hasattr',
new=fakehasattr):
with self.network() as net:
res = self._create_subnet_bulk(self.fmt, 2,
net['network']['id'],
'test')
self._validate_behavior_on_bulk_success(res, 'subnets')
def test_create_subnets_bulk_emulated_plugin_failure(self):
real_has_attr = hasattr
#ensures the API choose the emulation code path
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
return real_has_attr(item, attr)
with mock.patch('six.moves.builtins.hasattr',
new=fakehasattr):
orig = manager.NeutronManager.get_plugin().create_subnet
method_to_patch = _get_create_db_method('subnet')
with mock.patch.object(manager.NeutronManager.get_plugin(),
method_to_patch) as patched_plugin:
def side_effect(*args, **kwargs):
self._fail_second_call(patched_plugin, orig,
*args, **kwargs)
patched_plugin.side_effect = side_effect
with self.network() as net:
res = self._create_subnet_bulk(self.fmt, 2,
net['network']['id'],
'test')
self._delete('networks', net['network']['id'])
# We expect a 500 as we injected a fault in the plugin
self._validate_behavior_on_bulk_failure(
res, 'subnets', webob.exc.HTTPServerError.code
)
def test_create_subnets_bulk_native_plugin_failure(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk subnet create")
plugin = manager.NeutronManager.get_plugin()
orig = plugin.create_subnet
method_to_patch = _get_create_db_method('subnet')
with mock.patch.object(plugin, method_to_patch) as patched_plugin:
def side_effect(*args, **kwargs):
return self._fail_second_call(patched_plugin, orig,
*args, **kwargs)
patched_plugin.side_effect = side_effect
with self.network() as net:
res = self._create_subnet_bulk(self.fmt, 2,
net['network']['id'],
'test')
# We expect a 500 as we injected a fault in the plugin
self._validate_behavior_on_bulk_failure(
res, 'subnets', webob.exc.HTTPServerError.code
)
def test_delete_subnet(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
# Create new network
res = self._create_network(fmt=self.fmt, name='net',
admin_state_up=True)
network = self.deserialize(self.fmt, res)
subnet = self._make_subnet(self.fmt, network, gateway_ip,
cidr, ip_version=4)
req = self.new_delete_request('subnets', subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code)
def test_delete_subnet_port_exists_owned_by_network(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
# Create new network
res = self._create_network(fmt=self.fmt, name='net',
admin_state_up=True)
network = self.deserialize(self.fmt, res)
subnet = self._make_subnet(self.fmt, network, gateway_ip,
cidr, ip_version=4)
self._create_port(self.fmt,
network['network']['id'],
device_owner=constants.DEVICE_OWNER_DHCP)
req = self.new_delete_request('subnets', subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code)
def test_delete_subnet_dhcp_port_associated_with_other_subnets(self):
res = self._create_network(fmt=self.fmt, name='net',
admin_state_up=True)
network = self.deserialize(self.fmt, res)
subnet1 = self._make_subnet(self.fmt, network, '10.0.0.1',
'10.0.0.0/24', ip_version=4)
subnet2 = self._make_subnet(self.fmt, network, '10.0.1.1',
'10.0.1.0/24', ip_version=4)
res = self._create_port(self.fmt,
network['network']['id'],
device_owner=constants.DEVICE_OWNER_DHCP,
fixed_ips=[
{'subnet_id': subnet1['subnet']['id']},
{'subnet_id': subnet2['subnet']['id']}
])
port = self.deserialize(self.fmt, res)
expected_subnets = [subnet1['subnet']['id'], subnet2['subnet']['id']]
self.assertEqual(expected_subnets,
[s['subnet_id'] for s in port['port']['fixed_ips']])
req = self.new_delete_request('subnets', subnet1['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, 204)
port = self._show('ports', port['port']['id'])
expected_subnets = [subnet2['subnet']['id']]
self.assertEqual(expected_subnets,
[s['subnet_id'] for s in port['port']['fixed_ips']])
req = self.new_delete_request('subnets', subnet2['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, 204)
port = self._show('ports', port['port']['id'])
self.assertFalse(port['port']['fixed_ips'])
def test_delete_subnet_port_exists_owned_by_other(self):
with self.subnet() as subnet:
with self.port(subnet=subnet):
id = subnet['subnet']['id']
req = self.new_delete_request('subnets', id)
res = req.get_response(self.api)
data = self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, webob.exc.HTTPConflict.code)
msg = str(n_exc.SubnetInUse(subnet_id=id))
self.assertEqual(data['NeutronError']['message'], msg)
def test_delete_subnet_with_other_subnet_on_network_still_in_use(self):
with self.network() as network:
with self.subnet(network=network) as subnet1,\
self.subnet(network=network,
cidr='10.0.1.0/24') as subnet2:
subnet1_id = subnet1['subnet']['id']
subnet2_id = subnet2['subnet']['id']
with self.port(
subnet=subnet1,
fixed_ips=[{'subnet_id': subnet1_id}]):
req = self.new_delete_request('subnets', subnet2_id)
res = req.get_response(self.api)
self.assertEqual(res.status_int,
webob.exc.HTTPNoContent.code)
def _create_slaac_subnet_and_port(self, port_owner=None):
# Create an IPv6 SLAAC subnet and a port using that subnet
res = self._create_network(fmt=self.fmt, name='net',
admin_state_up=True)
network = self.deserialize(self.fmt, res)
subnet = self._make_subnet(self.fmt, network, gateway='fe80::1',
cidr='fe80::/64', ip_version=6,
ipv6_ra_mode=constants.IPV6_SLAAC,
ipv6_address_mode=constants.IPV6_SLAAC)
kwargs = {}
if port_owner:
kwargs['device_owner'] = port_owner
if port_owner in constants.ROUTER_INTERFACE_OWNERS:
kwargs['fixed_ips'] = [{'ip_address': 'fe80::1'}]
res = self._create_port(self.fmt, net_id=network['network']['id'],
**kwargs)
port = self.deserialize(self.fmt, res)
self.assertEqual(1, len(port['port']['fixed_ips']))
# The port should have an address from the subnet
req = self.new_show_request('ports', port['port']['id'], self.fmt)
res = req.get_response(self.api)
sport = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(1, len(sport['port']['fixed_ips']))
return subnet, port
def test_delete_subnet_ipv6_slaac_port_exists(self):
"""Test IPv6 SLAAC subnet delete when a port is still using subnet."""
subnet, port = self._create_slaac_subnet_and_port()
# Delete the subnet
req = self.new_delete_request('subnets', subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
# The port should no longer have an address from the deleted subnet
req = self.new_show_request('ports', port['port']['id'], self.fmt)
res = req.get_response(self.api)
sport = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(0, len(sport['port']['fixed_ips']))
def test_delete_subnet_ipv6_slaac_router_port_exists(self):
"""Test IPv6 SLAAC subnet delete with a router port using the subnet"""
subnet, port = self._create_slaac_subnet_and_port(
constants.DEVICE_OWNER_ROUTER_INTF)
# Delete the subnet and assert that we get a HTTP 409 error
req = self.new_delete_request('subnets', subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(webob.exc.HTTPConflict.code, res.status_int)
# The subnet should still exist and the port should still have an
# address from the subnet
req = self.new_show_request('subnets', subnet['subnet']['id'],
self.fmt)
res = req.get_response(self.api)
ssubnet = self.deserialize(self.fmt, req.get_response(self.api))
self.assertIsNotNone(ssubnet)
req = self.new_show_request('ports', port['port']['id'], self.fmt)
res = req.get_response(self.api)
sport = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(1, len(sport['port']['fixed_ips']))
port_subnet_ids = [fip['subnet_id'] for fip in
sport['port']['fixed_ips']]
self.assertIn(subnet['subnet']['id'], port_subnet_ids)
def test_delete_network(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
# Create new network
res = self._create_network(fmt=self.fmt, name='net',
admin_state_up=True)
network = self.deserialize(self.fmt, res)
subnet = self._make_subnet(self.fmt, network, gateway_ip, cidr,
ip_version=4)
req = self.new_delete_request('networks', network['network']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code)
req = self.new_show_request('subnets', subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int)
def test_create_subnet_bad_tenant(self):
with self.network() as network:
self._create_subnet(self.fmt,
network['network']['id'],
'10.0.2.0/24',
webob.exc.HTTPNotFound.code,
ip_version=4,
tenant_id='bad_tenant_id',
gateway_ip='10.0.2.1',
device_owner='fake_owner',
set_context=True)
def test_create_subnet_as_admin(self):
with self.network() as network:
self._create_subnet(self.fmt,
network['network']['id'],
'10.0.2.0/24',
webob.exc.HTTPCreated.code,
ip_version=4,
tenant_id='bad_tenant_id',
gateway_ip='10.0.2.1',
device_owner='fake_owner',
set_context=False)
def test_create_subnet_nonzero_cidr(self):
with self.subnet(cidr='10.129.122.5/8') as v1,\
self.subnet(cidr='11.129.122.5/15') as v2,\
self.subnet(cidr='12.129.122.5/16') as v3,\
self.subnet(cidr='13.129.122.5/18') as v4,\
self.subnet(cidr='14.129.122.5/22') as v5,\
self.subnet(cidr='15.129.122.5/24') as v6,\
self.subnet(cidr='16.129.122.5/28') as v7,\
self.subnet(cidr='17.129.122.5/32', enable_dhcp=False) as v8:
subs = (v1, v2, v3, v4, v5, v6, v7, v8)
# the API should accept and correct these for users
self.assertEqual(subs[0]['subnet']['cidr'], '10.0.0.0/8')
self.assertEqual(subs[1]['subnet']['cidr'], '11.128.0.0/15')
self.assertEqual(subs[2]['subnet']['cidr'], '12.129.0.0/16')
self.assertEqual(subs[3]['subnet']['cidr'], '13.129.64.0/18')
self.assertEqual(subs[4]['subnet']['cidr'], '14.129.120.0/22')
self.assertEqual(subs[5]['subnet']['cidr'], '15.129.122.0/24')
self.assertEqual(subs[6]['subnet']['cidr'], '16.129.122.0/28')
self.assertEqual(subs[7]['subnet']['cidr'], '17.129.122.5/32')
def _test_create_subnet_with_invalid_netmask_returns_400(self, *args):
with self.network() as network:
for cidr in args:
ip_version = netaddr.IPNetwork(cidr).version
self._create_subnet(self.fmt,
network['network']['id'],
cidr,
webob.exc.HTTPClientError.code,
ip_version=ip_version)
def test_create_subnet_with_invalid_netmask_returns_400_ipv4(self):
self._test_create_subnet_with_invalid_netmask_returns_400(
'10.0.0.0/31', '10.0.0.0/32')
def test_create_subnet_with_invalid_netmask_returns_400_ipv6(self):
self._test_create_subnet_with_invalid_netmask_returns_400(
'cafe:cafe::/127', 'cafe:cafe::/128')
def test_create_subnet_bad_ip_version(self):
with self.network() as network:
# Check bad IP version
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '10.0.2.0/24',
'ip_version': 'abc',
'tenant_id': network['network']['tenant_id'],
'gateway_ip': '10.0.2.1'}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_create_subnet_bad_ip_version_null(self):
with self.network() as network:
# Check bad IP version
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '10.0.2.0/24',
'ip_version': None,
'tenant_id': network['network']['tenant_id'],
'gateway_ip': '10.0.2.1'}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_create_subnet_bad_uuid(self):
with self.network() as network:
# Check invalid UUID
data = {'subnet': {'network_id': None,
'cidr': '10.0.2.0/24',
'ip_version': 4,
'tenant_id': network['network']['tenant_id'],
'gateway_ip': '10.0.2.1'}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_create_subnet_bad_boolean(self):
with self.network() as network:
# Check invalid boolean
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '10.0.2.0/24',
'ip_version': '4',
'enable_dhcp': None,
'tenant_id': network['network']['tenant_id'],
'gateway_ip': '10.0.2.1'}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_create_subnet_bad_pools(self):
with self.network() as network:
# Check allocation pools
allocation_pools = [[{'end': '10.0.0.254'}],
[{'start': '10.0.0.254'}],
[{'start': '1000.0.0.254'}],
[{'start': '10.0.0.2', 'end': '10.0.0.254'},
{'end': '10.0.0.254'}],
None,
[{'start': '10.0.0.2', 'end': '10.0.0.3'},
{'start': '10.0.0.2', 'end': '10.0.0.3'}]]
tenant_id = network['network']['tenant_id']
for pool in allocation_pools:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '10.0.2.0/24',
'ip_version': '4',
'tenant_id': tenant_id,
'gateway_ip': '10.0.2.1',
'allocation_pools': pool}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
def test_create_subnet_bad_nameserver(self):
with self.network() as network:
# Check nameservers
nameserver_pools = [['1100.0.0.2'],
['1.1.1.2', '1.1000.1.3'],
['1.1.1.2', '1.1.1.2']]
tenant_id = network['network']['tenant_id']
for nameservers in nameserver_pools:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '10.0.2.0/24',
'ip_version': '4',
'tenant_id': tenant_id,
'gateway_ip': '10.0.2.1',
'dns_nameservers': nameservers}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
def test_create_subnet_bad_hostroutes(self):
with self.network() as network:
# Check hostroutes
hostroute_pools = [[{'destination': '100.0.0.0/24'}],
[{'nexthop': '10.0.2.20'}],
[{'nexthop': '10.0.2.20',
'destination': '100.0.0.0/8'},
{'nexthop': '10.0.2.20',
'destination': '100.0.0.0/8'}]]
tenant_id = network['network']['tenant_id']
for hostroutes in hostroute_pools:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '10.0.2.0/24',
'ip_version': '4',
'tenant_id': tenant_id,
'gateway_ip': '10.0.2.1',
'host_routes': hostroutes}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
def test_create_subnet_defaults(self):
gateway = '10.0.0.1'
cidr = '10.0.0.0/24'
allocation_pools = [{'start': '10.0.0.2',
'end': '10.0.0.254'}]
enable_dhcp = True
subnet = self._test_create_subnet()
# verify cidr & gw have been correctly generated
self.assertEqual(subnet['subnet']['cidr'], cidr)
self.assertEqual(subnet['subnet']['gateway_ip'], gateway)
self.assertEqual(subnet['subnet']['enable_dhcp'], enable_dhcp)
self.assertEqual(subnet['subnet']['allocation_pools'],
allocation_pools)
def test_create_subnet_gw_values(self):
cidr = '10.0.0.0/24'
# Gateway is last IP in range
gateway = '10.0.0.254'
allocation_pools = [{'start': '10.0.0.1',
'end': '10.0.0.253'}]
expected = {'gateway_ip': gateway,
'cidr': cidr,
'allocation_pools': allocation_pools}
self._test_create_subnet(expected=expected, gateway_ip=gateway)
# Gateway is first in subnet
gateway = '10.0.0.1'
allocation_pools = [{'start': '10.0.0.2',
'end': '10.0.0.254'}]
expected = {'gateway_ip': gateway,
'cidr': cidr,
'allocation_pools': allocation_pools}
self._test_create_subnet(expected=expected,
gateway_ip=gateway)
def test_create_subnet_ipv6_gw_values(self):
cidr = '2001::/64'
# Gateway is last IP in IPv6 DHCPv6 stateful subnet
gateway = '2001::ffff:ffff:ffff:ffff'
allocation_pools = [{'start': '2001::1',
'end': '2001::ffff:ffff:ffff:fffe'}]
expected = {'gateway_ip': gateway,
'cidr': cidr,
'allocation_pools': allocation_pools}
self._test_create_subnet(expected=expected, gateway_ip=gateway,
cidr=cidr, ip_version=6,
ipv6_ra_mode=constants.DHCPV6_STATEFUL,
ipv6_address_mode=constants.DHCPV6_STATEFUL)
# Gateway is first IP in IPv6 DHCPv6 stateful subnet
gateway = '2001::1'
allocation_pools = [{'start': '2001::2',
'end': '2001::ffff:ffff:ffff:ffff'}]
expected = {'gateway_ip': gateway,
'cidr': cidr,
'allocation_pools': allocation_pools}
self._test_create_subnet(expected=expected, gateway_ip=gateway,
cidr=cidr, ip_version=6,
ipv6_ra_mode=constants.DHCPV6_STATEFUL,
ipv6_address_mode=constants.DHCPV6_STATEFUL)
# If gateway_ip is not specified, allocate first IP from the subnet
expected = {'gateway_ip': gateway,
'cidr': cidr}
self._test_create_subnet(expected=expected,
cidr=cidr, ip_version=6,
ipv6_ra_mode=constants.IPV6_SLAAC,
ipv6_address_mode=constants.IPV6_SLAAC)
def test_create_subnet_gw_outside_cidr_returns_400(self):
with self.network() as network:
self._create_subnet(self.fmt,
network['network']['id'],
'10.0.0.0/24',
webob.exc.HTTPClientError.code,
gateway_ip='100.0.0.1')
def test_create_subnet_gw_of_network_returns_400(self):
with self.network() as network:
self._create_subnet(self.fmt,
network['network']['id'],
'10.0.0.0/24',
webob.exc.HTTPClientError.code,
gateway_ip='10.0.0.0')
def test_create_subnet_gw_bcast_returns_400(self):
with self.network() as network:
self._create_subnet(self.fmt,
network['network']['id'],
'10.0.0.0/24',
webob.exc.HTTPClientError.code,
gateway_ip='10.0.0.255')
def test_create_subnet_with_allocation_pool(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
allocation_pools = [{'start': '10.0.0.2',
'end': '10.0.0.100'}]
self._test_create_subnet(gateway_ip=gateway_ip,
cidr=cidr,
allocation_pools=allocation_pools)
def test_create_subnet_with_none_gateway(self):
cidr = '10.0.0.0/24'
self._test_create_subnet(gateway_ip=None,
cidr=cidr)
def test_create_subnet_with_none_gateway_fully_allocated(self):
cidr = '10.0.0.0/24'
allocation_pools = [{'start': '10.0.0.1',
'end': '10.0.0.254'}]
self._test_create_subnet(gateway_ip=None,
cidr=cidr,
allocation_pools=allocation_pools)
def test_subnet_with_allocation_range(self):
with self.network() as network:
net_id = network['network']['id']
data = {'subnet': {'network_id': net_id,
'cidr': '10.0.0.0/24',
'ip_version': 4,
'gateway_ip': '10.0.0.1',
'tenant_id': network['network']['tenant_id'],
'allocation_pools': [{'start': '10.0.0.100',
'end': '10.0.0.120'}]}}
subnet_req = self.new_create_request('subnets', data)
subnet = self.deserialize(self.fmt,
subnet_req.get_response(self.api))
# Check fixed IP not in allocation range
kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'],
'ip_address': '10.0.0.10'}]}
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
self.assertEqual(res.status_int, webob.exc.HTTPCreated.code)
port = self.deserialize(self.fmt, res)
# delete the port
self._delete('ports', port['port']['id'])
# Check when fixed IP is gateway
kwargs = {"fixed_ips": [{'subnet_id': subnet['subnet']['id'],
'ip_address': '10.0.0.1'}]}
res = self._create_port(self.fmt, net_id=net_id, **kwargs)
self.assertEqual(res.status_int, webob.exc.HTTPCreated.code)
port = self.deserialize(self.fmt, res)
# delete the port
self._delete('ports', port['port']['id'])
def test_create_subnet_with_none_gateway_allocation_pool(self):
cidr = '10.0.0.0/24'
allocation_pools = [{'start': '10.0.0.2',
'end': '10.0.0.100'}]
self._test_create_subnet(gateway_ip=None,
cidr=cidr,
allocation_pools=allocation_pools)
def test_create_subnet_with_v6_allocation_pool(self):
gateway_ip = 'fe80::1'
cidr = 'fe80::/80'
allocation_pools = [{'start': 'fe80::2',
'end': 'fe80::ffff:fffa:ffff'}]
self._test_create_subnet(gateway_ip=gateway_ip,
cidr=cidr, ip_version=6,
allocation_pools=allocation_pools)
def test_create_subnet_with_large_allocation_pool(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/8'
allocation_pools = [{'start': '10.0.0.2',
'end': '10.0.0.100'},
{'start': '10.1.0.0',
'end': '10.200.0.100'}]
self._test_create_subnet(gateway_ip=gateway_ip,
cidr=cidr,
allocation_pools=allocation_pools)
def test_create_subnet_multiple_allocation_pools(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
allocation_pools = [{'start': '10.0.0.2',
'end': '10.0.0.100'},
{'start': '10.0.0.110',
'end': '10.0.0.150'}]
self._test_create_subnet(gateway_ip=gateway_ip,
cidr=cidr,
allocation_pools=allocation_pools)
def test_create_subnet_with_dhcp_disabled(self):
enable_dhcp = False
self._test_create_subnet(enable_dhcp=enable_dhcp)
def test_create_subnet_default_gw_conflict_allocation_pool_returns_409(
self):
cidr = '10.0.0.0/24'
allocation_pools = [{'start': '10.0.0.1',
'end': '10.0.0.5'}]
with testlib_api.ExpectedException(
webob.exc.HTTPClientError) as ctx_manager:
self._test_create_subnet(cidr=cidr,
allocation_pools=allocation_pools)
self.assertEqual(ctx_manager.exception.code,
webob.exc.HTTPConflict.code)
def test_create_subnet_gateway_in_allocation_pool_returns_409(self):
gateway_ip = '10.0.0.50'
cidr = '10.0.0.0/24'
allocation_pools = [{'start': '10.0.0.1',
'end': '10.0.0.100'}]
with testlib_api.ExpectedException(
webob.exc.HTTPClientError) as ctx_manager:
self._test_create_subnet(gateway_ip=gateway_ip,
cidr=cidr,
allocation_pools=allocation_pools)
self.assertEqual(ctx_manager.exception.code,
webob.exc.HTTPConflict.code)
def test_create_subnet_overlapping_allocation_pools_returns_409(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
allocation_pools = [{'start': '10.0.0.2',
'end': '10.0.0.150'},
{'start': '10.0.0.140',
'end': '10.0.0.180'}]
with testlib_api.ExpectedException(
webob.exc.HTTPClientError) as ctx_manager:
self._test_create_subnet(gateway_ip=gateway_ip,
cidr=cidr,
allocation_pools=allocation_pools)
self.assertEqual(ctx_manager.exception.code,
webob.exc.HTTPConflict.code)
def test_create_subnet_invalid_allocation_pool_returns_400(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
allocation_pools = [{'start': '10.0.0.2',
'end': '10.0.0.256'}]
with testlib_api.ExpectedException(
webob.exc.HTTPClientError) as ctx_manager:
self._test_create_subnet(gateway_ip=gateway_ip,
cidr=cidr,
allocation_pools=allocation_pools)
self.assertEqual(ctx_manager.exception.code,
webob.exc.HTTPClientError.code)
def test_create_subnet_out_of_range_allocation_pool_returns_400(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
allocation_pools = [{'start': '10.0.0.2',
'end': '10.0.1.6'}]
with testlib_api.ExpectedException(
webob.exc.HTTPClientError) as ctx_manager:
self._test_create_subnet(gateway_ip=gateway_ip,
cidr=cidr,
allocation_pools=allocation_pools)
self.assertEqual(ctx_manager.exception.code,
webob.exc.HTTPClientError.code)
def test_create_subnet_shared_returns_400(self):
cidr = '10.0.0.0/24'
with testlib_api.ExpectedException(
webob.exc.HTTPClientError) as ctx_manager:
self._test_create_subnet(cidr=cidr,
shared=True)
self.assertEqual(ctx_manager.exception.code,
webob.exc.HTTPClientError.code)
def test_create_subnet_inconsistent_ipv6_cidrv4(self):
with self.network() as network:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '10.0.2.0/24',
'ip_version': 6,
'tenant_id': network['network']['tenant_id']}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_create_subnet_inconsistent_ipv4_cidrv6(self):
with self.network() as network:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': 'fe80::0/80',
'ip_version': 4,
'tenant_id': network['network']['tenant_id']}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_create_subnet_inconsistent_ipv4_gatewayv6(self):
with self.network() as network:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '10.0.2.0/24',
'ip_version': 4,
'gateway_ip': 'fe80::1',
'tenant_id': network['network']['tenant_id']}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_create_subnet_inconsistent_ipv6_gatewayv4(self):
with self.network() as network:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': 'fe80::0/80',
'ip_version': 6,
'gateway_ip': '192.168.0.1',
'tenant_id': network['network']['tenant_id']}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_create_subnet_inconsistent_ipv6_dns_v4(self):
with self.network() as network:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': 'fe80::0/80',
'ip_version': 6,
'dns_nameservers': ['192.168.0.1'],
'tenant_id': network['network']['tenant_id']}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_create_subnet_inconsistent_ipv4_hostroute_dst_v6(self):
host_routes = [{'destination': 'fe80::0/48',
'nexthop': '10.0.2.20'}]
with self.network() as network:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '10.0.2.0/24',
'ip_version': 4,
'host_routes': host_routes,
'tenant_id': network['network']['tenant_id']}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_create_subnet_inconsistent_ipv4_hostroute_np_v6(self):
host_routes = [{'destination': '172.16.0.0/24',
'nexthop': 'fe80::1'}]
with self.network() as network:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '10.0.2.0/24',
'ip_version': 4,
'host_routes': host_routes,
'tenant_id': network['network']['tenant_id']}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def _test_validate_subnet_ipv6_modes(self, cur_subnet=None,
expect_success=True, **modes):
plugin = manager.NeutronManager.get_plugin()
ctx = context.get_admin_context()
new_subnet = {'ip_version': 6,
'cidr': 'fe80::/64',
'enable_dhcp': True,
'ipv6_address_mode': None,
'ipv6_ra_mode': None}
for mode, value in modes.items():
new_subnet[mode] = value
if expect_success:
plugin._validate_subnet(ctx, new_subnet, cur_subnet)
else:
self.assertRaises(n_exc.InvalidInput, plugin._validate_subnet,
ctx, new_subnet, cur_subnet)
def test_create_subnet_ipv6_ra_modes(self):
# Test all RA modes with no address mode specified
for ra_mode in constants.IPV6_MODES:
self._test_validate_subnet_ipv6_modes(
ipv6_ra_mode=ra_mode)
def test_create_subnet_ipv6_addr_modes(self):
# Test all address modes with no RA mode specified
for addr_mode in constants.IPV6_MODES:
self._test_validate_subnet_ipv6_modes(
ipv6_address_mode=addr_mode)
def test_create_subnet_ipv6_same_ra_and_addr_modes(self):
# Test all ipv6 modes with ra_mode==addr_mode
for ipv6_mode in constants.IPV6_MODES:
self._test_validate_subnet_ipv6_modes(
ipv6_ra_mode=ipv6_mode,
ipv6_address_mode=ipv6_mode)
def test_create_subnet_ipv6_different_ra_and_addr_modes(self):
# Test all ipv6 modes with ra_mode!=addr_mode
for ra_mode, addr_mode in itertools.permutations(
constants.IPV6_MODES, 2):
self._test_validate_subnet_ipv6_modes(
expect_success=not (ra_mode and addr_mode),
ipv6_ra_mode=ra_mode,
ipv6_address_mode=addr_mode)
def test_create_subnet_ipv6_out_of_cidr_global(self):
gateway_ip = '2000::1'
cidr = '2001::/64'
with testlib_api.ExpectedException(
webob.exc.HTTPClientError) as ctx_manager:
self._test_create_subnet(
gateway_ip=gateway_ip, cidr=cidr, ip_version=6,
ipv6_ra_mode=constants.DHCPV6_STATEFUL,
ipv6_address_mode=constants.DHCPV6_STATEFUL)
self.assertEqual(ctx_manager.exception.code,
webob.exc.HTTPClientError.code)
def test_create_subnet_ipv6_out_of_cidr_lla(self):
gateway_ip = 'fe80::1'
cidr = '2001::/64'
self._test_create_subnet(
gateway_ip=gateway_ip, cidr=cidr, ip_version=6,
ipv6_ra_mode=constants.IPV6_SLAAC,
ipv6_address_mode=constants.IPV6_SLAAC)
def test_create_subnet_ipv6_attributes_no_dhcp_enabled(self):
gateway_ip = 'fe80::1'
cidr = 'fe80::/64'
with testlib_api.ExpectedException(
webob.exc.HTTPClientError) as ctx_manager:
for mode in constants.IPV6_MODES:
self._test_create_subnet(gateway_ip=gateway_ip,
cidr=cidr, ip_version=6,
enable_dhcp=False,
ipv6_ra_mode=mode,
ipv6_address_mode=mode)
self.assertEqual(ctx_manager.exception.code,
webob.exc.HTTPClientError.code)
def test_create_subnet_invalid_ipv6_ra_mode(self):
gateway_ip = 'fe80::1'
cidr = 'fe80::/80'
with testlib_api.ExpectedException(
webob.exc.HTTPClientError) as ctx_manager:
self._test_create_subnet(gateway_ip=gateway_ip,
cidr=cidr, ip_version=6,
ipv6_ra_mode='foo',
ipv6_address_mode='slaac')
self.assertEqual(ctx_manager.exception.code,
webob.exc.HTTPClientError.code)
def test_create_subnet_invalid_ipv6_address_mode(self):
gateway_ip = 'fe80::1'
cidr = 'fe80::/80'
with testlib_api.ExpectedException(
webob.exc.HTTPClientError) as ctx_manager:
self._test_create_subnet(gateway_ip=gateway_ip,
cidr=cidr, ip_version=6,
ipv6_ra_mode='slaac',
ipv6_address_mode='baz')
self.assertEqual(ctx_manager.exception.code,
webob.exc.HTTPClientError.code)
def test_create_subnet_ipv6_ra_mode_ip_version_4(self):
cidr = '10.0.2.0/24'
with testlib_api.ExpectedException(
webob.exc.HTTPClientError) as ctx_manager:
self._test_create_subnet(cidr=cidr, ip_version=4,
ipv6_ra_mode=constants.DHCPV6_STATEFUL)
self.assertEqual(ctx_manager.exception.code,
webob.exc.HTTPClientError.code)
def test_create_subnet_ipv6_address_mode_ip_version_4(self):
cidr = '10.0.2.0/24'
with testlib_api.ExpectedException(
webob.exc.HTTPClientError) as ctx_manager:
self._test_create_subnet(
cidr=cidr, ip_version=4,
ipv6_address_mode=constants.DHCPV6_STATEFUL)
self.assertEqual(ctx_manager.exception.code,
webob.exc.HTTPClientError.code)
def _test_create_subnet_ipv6_auto_addr_with_port_on_network(
self, addr_mode, device_owner=DEVICE_OWNER_COMPUTE,
insert_db_reference_error=False):
# Create a network with one IPv4 subnet and one port
with self.network() as network,\
self.subnet(network=network) as v4_subnet,\
self.port(subnet=v4_subnet, device_owner=device_owner) as port:
if insert_db_reference_error:
def db_ref_err_for_ipalloc(instance):
if instance.__class__.__name__ == 'IPAllocation':
raise db_exc.DBReferenceError(
'dummy_table', 'dummy_constraint',
'dummy_key', 'dummy_key_table')
mock.patch.object(orm.Session, 'add',
side_effect=db_ref_err_for_ipalloc).start()
# Add an IPv6 auto-address subnet to the network
v6_subnet = self._make_subnet(self.fmt, network, 'fe80::1',
'fe80::/64', ip_version=6,
ipv6_ra_mode=addr_mode,
ipv6_address_mode=addr_mode)
if (insert_db_reference_error
or device_owner == constants.DEVICE_OWNER_ROUTER_SNAT
or device_owner in constants.ROUTER_INTERFACE_OWNERS):
# DVR SNAT and router interfaces should not have been
# updated with addresses from the new auto-address subnet
self.assertEqual(1, len(port['port']['fixed_ips']))
else:
# Confirm that the port has been updated with an address
# from the new auto-address subnet
req = self.new_show_request('ports', port['port']['id'],
self.fmt)
sport = self.deserialize(self.fmt, req.get_response(self.api))
fixed_ips = sport['port']['fixed_ips']
self.assertEqual(2, len(fixed_ips))
self.assertIn(v6_subnet['subnet']['id'],
[fixed_ip['subnet_id'] for fixed_ip
in fixed_ips])
def test_create_subnet_ipv6_slaac_with_port_on_network(self):
self._test_create_subnet_ipv6_auto_addr_with_port_on_network(
constants.IPV6_SLAAC)
def test_create_subnet_dhcpv6_stateless_with_port_on_network(self):
self._test_create_subnet_ipv6_auto_addr_with_port_on_network(
constants.DHCPV6_STATELESS)
def test_create_subnet_ipv6_slaac_with_dhcp_port_on_network(self):
self._test_create_subnet_ipv6_auto_addr_with_port_on_network(
constants.IPV6_SLAAC,
device_owner=constants.DEVICE_OWNER_DHCP)
def test_create_subnet_ipv6_slaac_with_router_intf_on_network(self):
self._test_create_subnet_ipv6_auto_addr_with_port_on_network(
constants.IPV6_SLAAC,
device_owner=constants.DEVICE_OWNER_ROUTER_INTF)
def test_create_subnet_ipv6_slaac_with_snat_intf_on_network(self):
self._test_create_subnet_ipv6_auto_addr_with_port_on_network(
constants.IPV6_SLAAC,
device_owner=constants.DEVICE_OWNER_ROUTER_SNAT)
def test_create_subnet_ipv6_slaac_with_db_reference_error(self):
self._test_create_subnet_ipv6_auto_addr_with_port_on_network(
constants.IPV6_SLAAC, insert_db_reference_error=True)
def test_update_subnet_no_gateway(self):
with self.subnet() as subnet:
data = {'subnet': {'gateway_ip': '10.0.0.1'}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(res['subnet']['gateway_ip'],
data['subnet']['gateway_ip'])
data = {'subnet': {'gateway_ip': None}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertIsNone(data['subnet']['gateway_ip'])
def test_update_subnet(self):
with self.subnet() as subnet:
data = {'subnet': {'gateway_ip': '10.0.0.1'}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(res['subnet']['gateway_ip'],
data['subnet']['gateway_ip'])
def test_update_subnet_adding_additional_host_routes_and_dns(self):
host_routes = [{'destination': '172.16.0.0/24',
'nexthop': '10.0.2.2'}]
with self.network() as network:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '10.0.2.0/24',
'ip_version': 4,
'dns_nameservers': ['192.168.0.1'],
'host_routes': host_routes,
'tenant_id': network['network']['tenant_id']}}
subnet_req = self.new_create_request('subnets', data)
res = self.deserialize(self.fmt, subnet_req.get_response(self.api))
host_routes = [{'destination': '172.16.0.0/24',
'nexthop': '10.0.2.2'},
{'destination': '192.168.0.0/24',
'nexthop': '10.0.2.3'}]
dns_nameservers = ['192.168.0.1', '192.168.0.2']
data = {'subnet': {'host_routes': host_routes,
'dns_nameservers': dns_nameservers}}
req = self.new_update_request('subnets', data,
res['subnet']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(sorted(res['subnet']['host_routes']),
sorted(host_routes))
self.assertEqual(sorted(res['subnet']['dns_nameservers']),
sorted(dns_nameservers))
def test_update_subnet_shared_returns_400(self):
with self.network(shared=True) as network:
with self.subnet(network=network) as subnet:
data = {'subnet': {'shared': True}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
def test_update_subnet_gw_outside_cidr_returns_400(self):
with self.network() as network:
with self.subnet(network=network) as subnet:
data = {'subnet': {'gateway_ip': '100.0.0.1'}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
def test_update_subnet_gw_ip_in_use_returns_409(self):
with self.network() as network:
with self.subnet(
network=network,
allocation_pools=[{'start': '10.0.0.100',
'end': '10.0.0.253'}]) as subnet:
subnet_data = subnet['subnet']
with self.port(
subnet=subnet,
fixed_ips=[{'subnet_id': subnet_data['id'],
'ip_address': subnet_data['gateway_ip']}]):
data = {'subnet': {'gateway_ip': '10.0.0.99'}}
req = self.new_update_request('subnets', data,
subnet_data['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, 409)
def test_update_subnet_inconsistent_ipv4_gatewayv6(self):
with self.network() as network:
with self.subnet(network=network) as subnet:
data = {'subnet': {'gateway_ip': 'fe80::1'}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
def test_update_subnet_inconsistent_ipv6_gatewayv4(self):
with self.network() as network:
with self.subnet(network=network,
ip_version=6, cidr='fe80::/48') as subnet:
data = {'subnet': {'gateway_ip': '10.1.1.1'}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
def test_update_subnet_inconsistent_ipv4_dns_v6(self):
dns_nameservers = ['fe80::1']
with self.network() as network:
with self.subnet(network=network) as subnet:
data = {'subnet': {'dns_nameservers': dns_nameservers}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
def test_update_subnet_inconsistent_ipv6_hostroute_dst_v4(self):
host_routes = [{'destination': 'fe80::0/48',
'nexthop': '10.0.2.20'}]
with self.network() as network:
with self.subnet(network=network,
ip_version=6, cidr='fe80::/48') as subnet:
data = {'subnet': {'host_routes': host_routes}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
def test_update_subnet_inconsistent_ipv6_hostroute_np_v4(self):
host_routes = [{'destination': '172.16.0.0/24',
'nexthop': 'fe80::1'}]
with self.network() as network:
with self.subnet(network=network,
ip_version=6, cidr='fe80::/48') as subnet:
data = {'subnet': {'host_routes': host_routes}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
def test_update_subnet_gateway_in_allocation_pool_returns_409(self):
allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.254'}]
with self.network() as network:
with self.subnet(network=network,
allocation_pools=allocation_pools,
cidr='10.0.0.0/24') as subnet:
data = {'subnet': {'gateway_ip': '10.0.0.50'}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int,
webob.exc.HTTPConflict.code)
def test_update_subnet_ipv6_attributes_fails(self):
with self.subnet(ip_version=6, cidr='fe80::/64',
ipv6_ra_mode=constants.IPV6_SLAAC,
ipv6_address_mode=constants.IPV6_SLAAC) as subnet:
data = {'subnet': {'ipv6_ra_mode': constants.DHCPV6_STATEFUL,
'ipv6_address_mode': constants.DHCPV6_STATEFUL}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
def test_update_subnet_ipv6_ra_mode_fails(self):
with self.subnet(ip_version=6, cidr='fe80::/64',
ipv6_ra_mode=constants.IPV6_SLAAC) as subnet:
data = {'subnet': {'ipv6_ra_mode': constants.DHCPV6_STATEFUL}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
def test_update_subnet_ipv6_address_mode_fails(self):
with self.subnet(ip_version=6, cidr='fe80::/64',
ipv6_address_mode=constants.IPV6_SLAAC) as subnet:
data = {'subnet': {'ipv6_address_mode': constants.DHCPV6_STATEFUL}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
def test_update_subnet_ipv6_cannot_disable_dhcp(self):
with self.subnet(ip_version=6, cidr='fe80::/64',
ipv6_ra_mode=constants.IPV6_SLAAC,
ipv6_address_mode=constants.IPV6_SLAAC) as subnet:
data = {'subnet': {'enable_dhcp': False}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
def test_update_subnet_ipv6_ra_mode_ip_version_4(self):
with self.network() as network:
with self.subnet(network=network) as subnet:
data = {'subnet': {'ipv6_ra_mode':
constants.DHCPV6_STATEFUL}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
def test_update_subnet_ipv6_address_mode_ip_version_4(self):
with self.network() as network:
with self.subnet(network=network) as subnet:
data = {'subnet': {'ipv6_address_mode':
constants.DHCPV6_STATEFUL}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
def _test_update_subnet_allocation_pools(self, with_gateway_ip=False):
"""Test that we can successfully update with sane params.
This will create a subnet with specified allocation_pools
Then issue an update (PUT) to update these using correct
(i.e. non erroneous) params. Finally retrieve the updated
subnet and verify.
"""
allocation_pools = [{'start': '192.168.0.2', 'end': '192.168.0.254'}]
with self.network() as network:
with self.subnet(network=network,
allocation_pools=allocation_pools,
cidr='192.168.0.0/24') as subnet:
data = {'subnet': {'allocation_pools': [
{'start': '192.168.0.10', 'end': '192.168.0.20'},
{'start': '192.168.0.30', 'end': '192.168.0.40'}]}}
if with_gateway_ip:
data['subnet']['gateway_ip'] = '192.168.0.9'
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
#check res code but then do GET on subnet for verification
res = req.get_response(self.api)
self.assertEqual(res.status_code, 200)
req = self.new_show_request('subnets', subnet['subnet']['id'],
self.fmt)
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(len(res['subnet']['allocation_pools']), 2)
res_vals = (
list(res['subnet']['allocation_pools'][0].values()) +
list(res['subnet']['allocation_pools'][1].values())
)
for pool_val in ['10', '20', '30', '40']:
self.assertTrue('192.168.0.%s' % (pool_val) in res_vals)
if with_gateway_ip:
self.assertEqual((res['subnet']['gateway_ip']),
'192.168.0.9')
def test_update_subnet_allocation_pools(self):
self._test_update_subnet_allocation_pools()
def test_update_subnet_allocation_pools_and_gateway_ip(self):
self._test_update_subnet_allocation_pools(with_gateway_ip=True)
#updating alloc pool to something outside subnet.cidr
def test_update_subnet_allocation_pools_invalid_pool_for_cidr(self):
"""Test update alloc pool to something outside subnet.cidr.
This makes sure that an erroneous allocation_pool specified
in a subnet update (outside subnet cidr) will result in an error.
"""
allocation_pools = [{'start': '192.168.0.2', 'end': '192.168.0.254'}]
with self.network() as network:
with self.subnet(network=network,
allocation_pools=allocation_pools,
cidr='192.168.0.0/24') as subnet:
data = {'subnet': {'allocation_pools': [
{'start': '10.0.0.10', 'end': '10.0.0.20'}]}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int,
webob.exc.HTTPClientError.code)
def _test_subnet_update_enable_dhcp_no_ip_available_returns_409(
self, allocation_pools, cidr):
ip_version = netaddr.IPNetwork(cidr).version
with self.network() as network:
with self.subnet(network=network,
allocation_pools=allocation_pools,
enable_dhcp=False,
cidr=cidr,
ip_version=ip_version) as subnet:
id = subnet['subnet']['network_id']
self._create_port(self.fmt, id)
data = {'subnet': {'enable_dhcp': True}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int,
webob.exc.HTTPConflict.code)
def test_subnet_update_enable_dhcp_no_ip_available_returns_409_ipv4(self):
allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.2'}]
cidr = '10.0.0.0/30'
self._test_subnet_update_enable_dhcp_no_ip_available_returns_409(
allocation_pools, cidr)
def test_subnet_update_enable_dhcp_no_ip_available_returns_409_ipv6(self):
allocation_pools = [{'start': '2001:db8::2', 'end': '2001:db8::2'}]
cidr = '2001:db8::/126'
self._test_subnet_update_enable_dhcp_no_ip_available_returns_409(
allocation_pools, cidr)
def test_show_subnet(self):
with self.network() as network:
with self.subnet(network=network) as subnet:
req = self.new_show_request('subnets',
subnet['subnet']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(res['subnet']['id'],
subnet['subnet']['id'])
self.assertEqual(res['subnet']['network_id'],
network['network']['id'])
def test_list_subnets(self):
with self.network() as network:
with self.subnet(network=network,
gateway_ip='10.0.0.1',
cidr='10.0.0.0/24') as v1,\
self.subnet(network=network,
gateway_ip='10.0.1.1',
cidr='10.0.1.0/24') as v2,\
self.subnet(network=network,
gateway_ip='10.0.2.1',
cidr='10.0.2.0/24') as v3:
subnets = (v1, v2, v3)
self._test_list_resources('subnet', subnets)
def test_list_subnets_shared(self):
with self.network(shared=True) as network:
with self.subnet(network=network, cidr='10.0.0.0/24') as subnet:
with self.subnet(cidr='10.0.1.0/24') as priv_subnet:
# normal user should see only 1 subnet
req = self.new_list_request('subnets')
req.environ['neutron.context'] = context.Context(
'', 'some_tenant')
res = self.deserialize(self.fmt,
req.get_response(self.api))
self.assertEqual(len(res['subnets']), 1)
self.assertEqual(res['subnets'][0]['cidr'],
subnet['subnet']['cidr'])
# admin will see both subnets
admin_req = self.new_list_request('subnets')
admin_res = self.deserialize(
self.fmt, admin_req.get_response(self.api))
self.assertEqual(len(admin_res['subnets']), 2)
cidrs = [sub['cidr'] for sub in admin_res['subnets']]
self.assertIn(subnet['subnet']['cidr'], cidrs)
self.assertIn(priv_subnet['subnet']['cidr'], cidrs)
def test_list_subnets_with_parameter(self):
with self.network() as network:
with self.subnet(network=network,
gateway_ip='10.0.0.1',
cidr='10.0.0.0/24') as v1,\
self.subnet(network=network,
gateway_ip='10.0.1.1',
cidr='10.0.1.0/24') as v2:
subnets = (v1, v2)
query_params = 'ip_version=4&ip_version=6'
self._test_list_resources('subnet', subnets,
query_params=query_params)
query_params = 'ip_version=6'
self._test_list_resources('subnet', [],
query_params=query_params)
def test_list_subnets_with_sort_native(self):
if self._skip_native_sorting:
self.skipTest("Skip test for not implemented sorting feature")
with self.subnet(enable_dhcp=True, cidr='10.0.0.0/24') as subnet1,\
self.subnet(enable_dhcp=False, cidr='11.0.0.0/24') as subnet2,\
self.subnet(enable_dhcp=False, cidr='12.0.0.0/24') as subnet3:
self._test_list_with_sort('subnet', (subnet3, subnet2, subnet1),
[('enable_dhcp', 'asc'),
('cidr', 'desc')])
def test_list_subnets_with_sort_emulated(self):
helper_patcher = mock.patch(
'neutron.api.v2.base.Controller._get_sorting_helper',
new=_fake_get_sorting_helper)
helper_patcher.start()
with self.subnet(enable_dhcp=True, cidr='10.0.0.0/24') as subnet1,\
self.subnet(enable_dhcp=False, cidr='11.0.0.0/24') as subnet2,\
self.subnet(enable_dhcp=False, cidr='12.0.0.0/24') as subnet3:
self._test_list_with_sort('subnet', (subnet3,
subnet2,
subnet1),
[('enable_dhcp', 'asc'),
('cidr', 'desc')])
def test_list_subnets_with_pagination_native(self):
if self._skip_native_pagination:
self.skipTest("Skip test for not implemented sorting feature")
with self.subnet(cidr='10.0.0.0/24') as subnet1,\
self.subnet(cidr='11.0.0.0/24') as subnet2,\
self.subnet(cidr='12.0.0.0/24') as subnet3:
self._test_list_with_pagination('subnet',
(subnet1, subnet2, subnet3),
('cidr', 'asc'), 2, 2)
def test_list_subnets_with_pagination_emulated(self):
helper_patcher = mock.patch(
'neutron.api.v2.base.Controller._get_pagination_helper',
new=_fake_get_pagination_helper)
helper_patcher.start()
with self.subnet(cidr='10.0.0.0/24') as subnet1,\
self.subnet(cidr='11.0.0.0/24') as subnet2,\
self.subnet(cidr='12.0.0.0/24') as subnet3:
self._test_list_with_pagination('subnet',
(subnet1, subnet2, subnet3),
('cidr', 'asc'), 2, 2)
def test_list_subnets_with_pagination_reverse_native(self):
if self._skip_native_sorting:
self.skipTest("Skip test for not implemented sorting feature")
with self.subnet(cidr='10.0.0.0/24') as subnet1,\
self.subnet(cidr='11.0.0.0/24') as subnet2,\
self.subnet(cidr='12.0.0.0/24') as subnet3:
self._test_list_with_pagination_reverse('subnet',
(subnet1, subnet2,
subnet3),
('cidr', 'asc'), 2, 2)
def test_list_subnets_with_pagination_reverse_emulated(self):
helper_patcher = mock.patch(
'neutron.api.v2.base.Controller._get_pagination_helper',
new=_fake_get_pagination_helper)
helper_patcher.start()
with self.subnet(cidr='10.0.0.0/24') as subnet1,\
self.subnet(cidr='11.0.0.0/24') as subnet2,\
self.subnet(cidr='12.0.0.0/24') as subnet3:
self._test_list_with_pagination_reverse('subnet',
(subnet1, subnet2,
subnet3),
('cidr', 'asc'), 2, 2)
def test_invalid_ip_version(self):
with self.network() as network:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '10.0.2.0/24',
'ip_version': 7,
'tenant_id': network['network']['tenant_id'],
'gateway_ip': '10.0.2.1'}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_invalid_subnet(self):
with self.network() as network:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': 'invalid',
'ip_version': 4,
'tenant_id': network['network']['tenant_id'],
'gateway_ip': '10.0.2.1'}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_invalid_ip_address(self):
with self.network() as network:
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '10.0.2.0/24',
'ip_version': 4,
'tenant_id': network['network']['tenant_id'],
'gateway_ip': 'ipaddress'}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_invalid_uuid(self):
with self.network() as network:
data = {'subnet': {'network_id': 'invalid-uuid',
'cidr': '10.0.2.0/24',
'ip_version': 4,
'tenant_id': network['network']['tenant_id'],
'gateway_ip': '10.0.0.1'}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_create_subnet_with_one_dns(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
allocation_pools = [{'start': '10.0.0.2',
'end': '10.0.0.100'}]
dns_nameservers = ['1.2.3.4']
self._test_create_subnet(gateway_ip=gateway_ip,
cidr=cidr,
allocation_pools=allocation_pools,
dns_nameservers=dns_nameservers)
def test_create_subnet_with_two_dns(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
allocation_pools = [{'start': '10.0.0.2',
'end': '10.0.0.100'}]
dns_nameservers = ['1.2.3.4', '4.3.2.1']
self._test_create_subnet(gateway_ip=gateway_ip,
cidr=cidr,
allocation_pools=allocation_pools,
dns_nameservers=dns_nameservers)
def test_create_subnet_with_too_many_dns(self):
with self.network() as network:
dns_list = ['1.1.1.1', '2.2.2.2', '3.3.3.3']
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '10.0.2.0/24',
'ip_version': 4,
'tenant_id': network['network']['tenant_id'],
'gateway_ip': '10.0.0.1',
'dns_nameservers': dns_list}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_create_subnet_with_one_host_route(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
allocation_pools = [{'start': '10.0.0.2',
'end': '10.0.0.100'}]
host_routes = [{'destination': '135.207.0.0/16',
'nexthop': '1.2.3.4'}]
self._test_create_subnet(gateway_ip=gateway_ip,
cidr=cidr,
allocation_pools=allocation_pools,
host_routes=host_routes)
def test_create_subnet_with_two_host_routes(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
allocation_pools = [{'start': '10.0.0.2',
'end': '10.0.0.100'}]
host_routes = [{'destination': '135.207.0.0/16',
'nexthop': '1.2.3.4'},
{'destination': '12.0.0.0/8',
'nexthop': '4.3.2.1'}]
self._test_create_subnet(gateway_ip=gateway_ip,
cidr=cidr,
allocation_pools=allocation_pools,
host_routes=host_routes)
def test_create_subnet_with_too_many_routes(self):
with self.network() as network:
host_routes = [{'destination': '135.207.0.0/16',
'nexthop': '1.2.3.4'},
{'destination': '12.0.0.0/8',
'nexthop': '4.3.2.1'},
{'destination': '141.212.0.0/16',
'nexthop': '2.2.2.2'}]
data = {'subnet': {'network_id': network['network']['id'],
'cidr': '10.0.2.0/24',
'ip_version': 4,
'tenant_id': network['network']['tenant_id'],
'gateway_ip': '10.0.0.1',
'host_routes': host_routes}}
subnet_req = self.new_create_request('subnets', data)
res = subnet_req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_update_subnet_dns(self):
with self.subnet() as subnet:
data = {'subnet': {'dns_nameservers': ['11.0.0.1']}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(res['subnet']['dns_nameservers'],
data['subnet']['dns_nameservers'])
def test_update_subnet_dns_to_None(self):
with self.subnet(dns_nameservers=['11.0.0.1']) as subnet:
data = {'subnet': {'dns_nameservers': None}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual([], res['subnet']['dns_nameservers'])
data = {'subnet': {'dns_nameservers': ['11.0.0.3']}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(data['subnet']['dns_nameservers'],
res['subnet']['dns_nameservers'])
def test_update_subnet_dns_with_too_many_entries(self):
with self.subnet() as subnet:
dns_list = ['1.1.1.1', '2.2.2.2', '3.3.3.3']
data = {'subnet': {'dns_nameservers': dns_list}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_update_subnet_route(self):
with self.subnet() as subnet:
data = {'subnet': {'host_routes':
[{'destination': '12.0.0.0/8', 'nexthop': '1.2.3.4'}]}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(res['subnet']['host_routes'],
data['subnet']['host_routes'])
def test_update_subnet_route_to_None(self):
with self.subnet(host_routes=[{'destination': '12.0.0.0/8',
'nexthop': '1.2.3.4'}]) as subnet:
data = {'subnet': {'host_routes': None}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual([], res['subnet']['host_routes'])
data = {'subnet': {'host_routes': [{'destination': '12.0.0.0/8',
'nexthop': '1.2.3.4'}]}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(data['subnet']['host_routes'],
res['subnet']['host_routes'])
def test_update_subnet_route_with_too_many_entries(self):
with self.subnet() as subnet:
data = {'subnet': {'host_routes': [
{'destination': '12.0.0.0/8', 'nexthop': '1.2.3.4'},
{'destination': '13.0.0.0/8', 'nexthop': '1.2.3.5'},
{'destination': '14.0.0.0/8', 'nexthop': '1.2.3.6'}]}}
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPClientError.code)
def test_delete_subnet_with_dns(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
dns_nameservers = ['1.2.3.4']
# Create new network
res = self._create_network(fmt=self.fmt, name='net',
admin_state_up=True)
network = self.deserialize(self.fmt, res)
subnet = self._make_subnet(self.fmt, network, gateway_ip,
cidr, ip_version=4,
dns_nameservers=dns_nameservers)
req = self.new_delete_request('subnets', subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code)
def test_delete_subnet_with_route(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
host_routes = [{'destination': '135.207.0.0/16',
'nexthop': '1.2.3.4'}]
# Create new network
res = self._create_network(fmt=self.fmt, name='net',
admin_state_up=True)
network = self.deserialize(self.fmt, res)
subnet = self._make_subnet(self.fmt, network, gateway_ip,
cidr, ip_version=4,
host_routes=host_routes)
req = self.new_delete_request('subnets', subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code)
def test_delete_subnet_with_dns_and_route(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
dns_nameservers = ['1.2.3.4']
host_routes = [{'destination': '135.207.0.0/16',
'nexthop': '1.2.3.4'}]
# Create new network
res = self._create_network(fmt=self.fmt, name='net',
admin_state_up=True)
network = self.deserialize(self.fmt, res)
subnet = self._make_subnet(self.fmt, network, gateway_ip,
cidr, ip_version=4,
dns_nameservers=dns_nameservers,
host_routes=host_routes)
req = self.new_delete_request('subnets', subnet['subnet']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code)
def test_delete_subnet_with_callback(self):
with self.subnet() as subnet,\
mock.patch.object(registry, 'notify') as notify:
errors = [
exceptions.NotificationError(
'fake_id', n_exc.NeutronException()),
]
notify.side_effect = [
exceptions.CallbackFailure(errors=errors), None
]
# Make sure the delete request fails
delete_request = self.new_delete_request('subnets',
subnet['subnet']['id'])
delete_response = delete_request.get_response(self.api)
self.assertTrue('NeutronError' in delete_response.json)
self.assertEqual('SubnetInUse',
delete_response.json['NeutronError']['type'])
# Make sure the subnet wasn't deleted
list_request = self.new_list_request(
'subnets', params="id=%s" % subnet['subnet']['id'])
list_response = list_request.get_response(self.api)
self.assertEqual(subnet['subnet']['id'],
list_response.json['subnets'][0]['id'])
def _helper_test_validate_subnet(self, option, exception):
cfg.CONF.set_override(option, 0)
with self.network() as network:
subnet = {'network_id': network['network']['id'],
'cidr': '10.0.2.0/24',
'ip_version': 4,
'tenant_id': network['network']['tenant_id'],
'gateway_ip': '10.0.2.1',
'dns_nameservers': ['8.8.8.8'],
'host_routes': [{'destination': '135.207.0.0/16',
'nexthop': '1.2.3.4'}]}
plugin = manager.NeutronManager.get_plugin()
e = self.assertRaises(exception,
plugin._validate_subnet,
context.get_admin_context(),
subnet)
self.assertThat(
str(e),
matchers.Not(matchers.Contains('built-in function id')))
def test_validate_subnet_dns_nameservers_exhausted(self):
self._helper_test_validate_subnet(
'max_dns_nameservers',
n_exc.DNSNameServersExhausted)
def test_validate_subnet_host_routes_exhausted(self):
self._helper_test_validate_subnet(
'max_subnet_host_routes',
n_exc.HostRoutesExhausted)
def test_port_prevents_network_deletion(self):
with self.port() as p:
self._delete('networks', p['port']['network_id'],
expected_code=webob.exc.HTTPConflict.code)
def test_port_prevents_subnet_deletion(self):
with self.port() as p:
self._delete('subnets', p['port']['fixed_ips'][0]['subnet_id'],
expected_code=webob.exc.HTTPConflict.code)
class TestSubnetPoolsV2(NeutronDbPluginV2TestCase):
_POOL_NAME = 'test-pool'
def _test_create_subnetpool(self, prefixes, expected=None,
admin=False, **kwargs):
keys = kwargs.copy()
keys.setdefault('tenant_id', self._tenant_id)
with self.subnetpool(prefixes, admin, **keys) as subnetpool:
self._validate_resource(subnetpool, keys, 'subnetpool')
if expected:
self._compare_resource(subnetpool, expected, 'subnetpool')
return subnetpool
def _validate_default_prefix(self, prefix, subnetpool):
self.assertEqual(subnetpool['subnetpool']['default_prefixlen'], prefix)
def _validate_min_prefix(self, prefix, subnetpool):
self.assertEqual(subnetpool['subnetpool']['min_prefixlen'], prefix)
def _validate_max_prefix(self, prefix, subnetpool):
self.assertEqual(subnetpool['subnetpool']['max_prefixlen'], prefix)
def test_create_subnetpool_empty_prefix_list(self):
self.assertRaises(webob.exc.HTTPClientError,
self._test_create_subnetpool,
[],
name=self._POOL_NAME,
tenant_id=self._tenant_id,
min_prefixlen='21')
def test_create_subnetpool_ipv4_24_with_defaults(self):
subnet = netaddr.IPNetwork('10.10.10.0/24')
subnetpool = self._test_create_subnetpool([subnet.cidr],
name=self._POOL_NAME,
tenant_id=self._tenant_id,
min_prefixlen='21')
self._validate_default_prefix('21', subnetpool)
self._validate_min_prefix('21', subnetpool)
def test_create_subnetpool_ipv4_21_with_defaults(self):
subnet = netaddr.IPNetwork('10.10.10.0/21')
subnetpool = self._test_create_subnetpool([subnet.cidr],
name=self._POOL_NAME,
tenant_id=self._tenant_id,
min_prefixlen='21')
self._validate_default_prefix('21', subnetpool)
self._validate_min_prefix('21', subnetpool)
def test_create_subnetpool_ipv4_default_prefix_too_small(self):
subnet = netaddr.IPNetwork('10.10.10.0/21')
self.assertRaises(webob.exc.HTTPClientError,
self._test_create_subnetpool,
[subnet.cidr],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='21',
default_prefixlen='20')
def test_create_subnetpool_ipv4_default_prefix_too_large(self):
subnet = netaddr.IPNetwork('10.10.10.0/21')
self.assertRaises(webob.exc.HTTPClientError,
self._test_create_subnetpool,
[subnet.cidr],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
max_prefixlen=24,
default_prefixlen='32')
def test_create_subnetpool_ipv4_default_prefix_bounds(self):
subnet = netaddr.IPNetwork('10.10.10.0/21')
subnetpool = self._test_create_subnetpool([subnet.cidr],
tenant_id=self._tenant_id,
name=self._POOL_NAME)
self._validate_min_prefix('8', subnetpool)
self._validate_default_prefix('8', subnetpool)
self._validate_max_prefix('32', subnetpool)
def test_create_subnetpool_ipv6_default_prefix_bounds(self):
subnet = netaddr.IPNetwork('fe80::/48')
subnetpool = self._test_create_subnetpool([subnet.cidr],
tenant_id=self._tenant_id,
name=self._POOL_NAME)
self._validate_min_prefix('64', subnetpool)
self._validate_default_prefix('64', subnetpool)
self._validate_max_prefix('128', subnetpool)
def test_create_subnetpool_ipv4_supported_default_prefix(self):
subnet = netaddr.IPNetwork('10.10.10.0/21')
subnetpool = self._test_create_subnetpool([subnet.cidr],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='21',
default_prefixlen='26')
self._validate_default_prefix('26', subnetpool)
def test_create_subnetpool_ipv4_supported_min_prefix(self):
subnet = netaddr.IPNetwork('10.10.10.0/24')
subnetpool = self._test_create_subnetpool([subnet.cidr],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='26')
self._validate_min_prefix('26', subnetpool)
self._validate_default_prefix('26', subnetpool)
def test_create_subnetpool_ipv4_default_prefix_smaller_than_min(self):
subnet = netaddr.IPNetwork('10.10.10.0/21')
self.assertRaises(webob.exc.HTTPClientError,
self._test_create_subnetpool,
[subnet.cidr],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
default_prefixlen='22',
min_prefixlen='23')
def test_create_subnetpool_mixed_ip_version(self):
subnet_v4 = netaddr.IPNetwork('10.10.10.0/21')
subnet_v6 = netaddr.IPNetwork('fe80::/48')
self.assertRaises(webob.exc.HTTPClientError,
self._test_create_subnetpool,
[subnet_v4.cidr, subnet_v6.cidr],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='21')
def test_create_subnetpool_ipv6_with_defaults(self):
subnet = netaddr.IPNetwork('fe80::/48')
subnetpool = self._test_create_subnetpool([subnet.cidr],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='48')
self._validate_default_prefix('48', subnetpool)
self._validate_min_prefix('48', subnetpool)
def test_get_subnetpool(self):
subnetpool = self._test_create_subnetpool(['10.10.10.0/24'],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='24')
req = self.new_show_request('subnetpools',
subnetpool['subnetpool']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(subnetpool['subnetpool']['id'],
res['subnetpool']['id'])
def test_get_subnetpool_different_tenants_not_shared(self):
subnetpool = self._test_create_subnetpool(['10.10.10.0/24'],
shared=False,
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='24')
req = self.new_show_request('subnetpools',
subnetpool['subnetpool']['id'])
neutron_context = context.Context('', 'not-the-owner')
req.environ['neutron.context'] = neutron_context
res = req.get_response(self.api)
self.assertEqual(res.status_int, 404)
def test_get_subnetpool_different_tenants_shared(self):
subnetpool = self._test_create_subnetpool(['10.10.10.0/24'],
None,
True,
name=self._POOL_NAME,
min_prefixlen='24',
shared=True)
req = self.new_show_request('subnetpools',
subnetpool['subnetpool']['id'])
neutron_context = context.Context('', self._tenant_id)
req.environ['neutron.context'] = neutron_context
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(subnetpool['subnetpool']['id'],
res['subnetpool']['id'])
def test_list_subnetpools_different_tenants_shared(self):
self._test_create_subnetpool(['10.10.10.0/24'],
None,
True,
name=self._POOL_NAME,
min_prefixlen='24',
shared=True)
admin_res = self._list('subnetpools')
mortal_res = self._list('subnetpools',
neutron_context=context.Context('', 'not-the-owner'))
self.assertEqual(len(admin_res['subnetpools']), 1)
self.assertEqual(len(mortal_res['subnetpools']), 1)
def test_list_subnetpools_different_tenants_not_shared(self):
self._test_create_subnetpool(['10.10.10.0/24'],
None,
True,
name=self._POOL_NAME,
min_prefixlen='24',
shared=False)
admin_res = self._list('subnetpools')
mortal_res = self._list('subnetpools',
neutron_context=context.Context('', 'not-the-owner'))
self.assertEqual(len(admin_res['subnetpools']), 1)
self.assertEqual(len(mortal_res['subnetpools']), 0)
def test_delete_subnetpool(self):
subnetpool = self._test_create_subnetpool(['10.10.10.0/24'],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='24')
req = self.new_delete_request('subnetpools',
subnetpool['subnetpool']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, 204)
def test_delete_nonexistent_subnetpool(self):
req = self.new_delete_request('subnetpools',
'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa')
res = req.get_response(self._api_for_resource('subnetpools'))
self.assertEqual(res.status_int, 404)
def test_update_subnetpool_prefix_list_append(self):
initial_subnetpool = self._test_create_subnetpool(['10.10.8.0/21'],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='24')
data = {'subnetpool': {'prefixes': ['10.10.8.0/21', '3.3.3.0/24',
'2.2.2.0/24']}}
req = self.new_update_request('subnetpools', data,
initial_subnetpool['subnetpool']['id'])
api = self._api_for_resource('subnetpools')
res = self.deserialize(self.fmt, req.get_response(api))
self.assertItemsEqual(res['subnetpool']['prefixes'],
['10.10.8.0/21', '3.3.3.0/24', '2.2.2.0/24'])
def test_update_subnetpool_prefix_list_compaction(self):
initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='24')
data = {'subnetpool': {'prefixes': ['10.10.10.0/24',
'10.10.11.0/24']}}
req = self.new_update_request('subnetpools', data,
initial_subnetpool['subnetpool']['id'])
api = self._api_for_resource('subnetpools')
res = self.deserialize(self.fmt, req.get_response(api))
self.assertItemsEqual(res['subnetpool']['prefixes'],
['10.10.10.0/23'])
def test_illegal_subnetpool_prefix_list_update(self):
initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='24')
data = {'subnetpool': {'prefixes': ['10.10.11.0/24']}}
req = self.new_update_request('subnetpools', data,
initial_subnetpool['subnetpool']['id'])
api = self._api_for_resource('subnetpools')
res = req.get_response(api)
self.assertEqual(res.status_int, 400)
def test_update_subnetpool_default_prefix(self):
initial_subnetpool = self._test_create_subnetpool(['10.10.8.0/21'],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='24')
data = {'subnetpool': {'default_prefixlen': '26'}}
req = self.new_update_request('subnetpools', data,
initial_subnetpool['subnetpool']['id'])
api = self._api_for_resource('subnetpools')
res = self.deserialize(self.fmt, req.get_response(api))
self.assertEqual(res['subnetpool']['default_prefixlen'], 26)
def test_update_subnetpool_min_prefix(self):
initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='24')
data = {'subnetpool': {'min_prefixlen': '21'}}
req = self.new_update_request('subnetpools', data,
initial_subnetpool['subnetpool']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(res['subnetpool']['min_prefixlen'], 21)
def test_update_subnetpool_min_prefix_larger_than_max(self):
initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='21',
max_prefixlen='24')
data = {'subnetpool': {'min_prefixlen': '28'}}
req = self.new_update_request('subnetpools', data,
initial_subnetpool['subnetpool']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, 400)
def test_update_subnetpool_max_prefix(self):
initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='21',
max_prefixlen='24')
data = {'subnetpool': {'max_prefixlen': '26'}}
req = self.new_update_request('subnetpools', data,
initial_subnetpool['subnetpool']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(res['subnetpool']['max_prefixlen'], 26)
def test_update_subnetpool_max_prefix_less_than_min(self):
initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='24')
data = {'subnetpool': {'max_prefixlen': '21'}}
req = self.new_update_request('subnetpools', data,
initial_subnetpool['subnetpool']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, 400)
def test_update_subnetpool_max_prefix_less_than_default(self):
initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='21',
default_prefixlen='24')
data = {'subnetpool': {'max_prefixlen': '22'}}
req = self.new_update_request('subnetpools', data,
initial_subnetpool['subnetpool']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, 400)
def test_update_subnetpool_default_prefix_less_than_min(self):
initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='21')
data = {'subnetpool': {'default_prefixlen': '20'}}
req = self.new_update_request('subnetpools', data,
initial_subnetpool['subnetpool']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, 400)
def test_update_subnetpool_default_prefix_larger_than_max(self):
initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='21',
max_prefixlen='24')
data = {'subnetpool': {'default_prefixlen': '28'}}
req = self.new_update_request('subnetpools', data,
initial_subnetpool['subnetpool']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, 400)
def test_update_subnetpool_prefix_list_mixed_ip_version(self):
initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='24')
data = {'subnetpool': {'prefixes': ['fe80::/48']}}
req = self.new_update_request('subnetpools', data,
initial_subnetpool['subnetpool']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, 400)
def test_update_subnetpool_default_quota(self):
initial_subnetpool = self._test_create_subnetpool(['10.10.10.0/24'],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='24',
default_quota=10)
self.assertEqual(initial_subnetpool['subnetpool']['default_quota'],
10)
data = {'subnetpool': {'default_quota': '1'}}
req = self.new_update_request('subnetpools', data,
initial_subnetpool['subnetpool']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(res['subnetpool']['default_quota'], 1)
def test_allocate_any_subnet_with_prefixlen(self):
with self.network() as network:
sp = self._test_create_subnetpool(['10.10.0.0/16'],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='21')
# Request a subnet allocation (no CIDR)
data = {'subnet': {'network_id': network['network']['id'],
'subnetpool_id': sp['subnetpool']['id'],
'prefixlen': 24,
'ip_version': 4,
'tenant_id': network['network']['tenant_id']}}
req = self.new_create_request('subnets', data)
res = self.deserialize(self.fmt, req.get_response(self.api))
subnet = netaddr.IPNetwork(res['subnet']['cidr'])
self.assertEqual(subnet.prefixlen, 24)
# Assert the allocated subnet CIDR is a subnet of our pool prefix
supernet = netaddr.smallest_matching_cidr(
subnet,
sp['subnetpool']['prefixes'])
self.assertEqual(supernet, netaddr.IPNetwork('10.10.0.0/16'))
def test_allocate_any_subnet_with_default_prefixlen(self):
with self.network() as network:
sp = self._test_create_subnetpool(['10.10.0.0/16'],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='21')
# Request any subnet allocation using default prefix
data = {'subnet': {'network_id': network['network']['id'],
'subnetpool_id': sp['subnetpool']['id'],
'ip_version': 4,
'tenant_id': network['network']['tenant_id']}}
req = self.new_create_request('subnets', data)
res = self.deserialize(self.fmt, req.get_response(self.api))
subnet = netaddr.IPNetwork(res['subnet']['cidr'])
self.assertEqual(subnet.prefixlen,
int(sp['subnetpool']['default_prefixlen']))
def test_allocate_specific_subnet_with_mismatch_prefixlen(self):
with self.network() as network:
sp = self._test_create_subnetpool(['10.10.0.0/16'],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='21')
data = {'subnet': {'network_id': network['network']['id'],
'subnetpool_id': sp['subnetpool']['id'],
'cidr': '10.10.1.0/24',
'prefixlen': 26,
'ip_version': 4,
'tenant_id': network['network']['tenant_id']}}
req = self.new_create_request('subnets', data)
res = req.get_response(self.api)
self.assertEqual(res.status_int, 400)
def test_allocate_specific_subnet_with_matching_prefixlen(self):
with self.network() as network:
sp = self._test_create_subnetpool(['10.10.0.0/16'],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='21')
data = {'subnet': {'network_id': network['network']['id'],
'subnetpool_id': sp['subnetpool']['id'],
'cidr': '10.10.1.0/24',
'prefixlen': 24,
'ip_version': 4,
'tenant_id': network['network']['tenant_id']}}
req = self.new_create_request('subnets', data)
res = req.get_response(self.api)
self.assertEqual(res.status_int, 400)
def test_allocate_specific_subnet(self):
with self.network() as network:
sp = self._test_create_subnetpool(['10.10.0.0/16'],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='21')
# Request a specific subnet allocation
data = {'subnet': {'network_id': network['network']['id'],
'subnetpool_id': sp['subnetpool']['id'],
'cidr': '10.10.1.0/24',
'ip_version': 4,
'tenant_id': network['network']['tenant_id']}}
req = self.new_create_request('subnets', data)
res = self.deserialize(self.fmt, req.get_response(self.api))
# Assert the allocated subnet CIDR is what we expect
subnet = netaddr.IPNetwork(res['subnet']['cidr'])
self.assertEqual(subnet, netaddr.IPNetwork('10.10.1.0/24'))
def test_allocate_specific_subnet_non_existent_prefix(self):
with self.network() as network:
sp = self._test_create_subnetpool(['10.10.0.0/16'],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='21')
# Request a specific subnet allocation
data = {'subnet': {'network_id': network['network']['id'],
'subnetpool_id': sp['subnetpool']['id'],
'cidr': '192.168.1.0/24',
'ip_version': 4,
'tenant_id': network['network']['tenant_id']}}
req = self.new_create_request('subnets', data)
res = req.get_response(self.api)
self.assertEqual(res.status_int, 500)
def test_allocate_specific_subnet_already_allocated(self):
with self.network() as network:
sp = self._test_create_subnetpool(['10.10.10.0/24'],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='21')
# Request a specific subnet allocation
data = {'subnet': {'network_id': network['network']['id'],
'subnetpool_id': sp['subnetpool']['id'],
'cidr': '10.10.10.0/24',
'ip_version': 4,
'tenant_id': network['network']['tenant_id']}}
req = self.new_create_request('subnets', data)
# Allocate the subnet
res = req.get_response(self.api)
self.assertEqual(res.status_int, 201)
# Attempt to allocate it again
res = req.get_response(self.api)
# Assert error
self.assertEqual(res.status_int, 500)
def test_allocate_specific_subnet_prefix_too_small(self):
with self.network() as network:
sp = self._test_create_subnetpool(['10.10.0.0/16'],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='21')
# Request a specific subnet allocation
data = {'subnet': {'network_id': network['network']['id'],
'subnetpool_id': sp['subnetpool']['id'],
'cidr': '10.10.0.0/20',
'ip_version': 4,
'tenant_id': network['network']['tenant_id']}}
req = self.new_create_request('subnets', data)
res = req.get_response(self.api)
self.assertEqual(res.status_int, 400)
def test_allocate_specific_subnet_prefix_specific_gw(self):
with self.network() as network:
sp = self._test_create_subnetpool(['10.10.0.0/16'],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='21')
# Request a specific subnet allocation
data = {'subnet': {'network_id': network['network']['id'],
'subnetpool_id': sp['subnetpool']['id'],
'cidr': '10.10.1.0/24',
'gateway_ip': '10.10.1.254',
'ip_version': 4,
'tenant_id': network['network']['tenant_id']}}
req = self.new_create_request('subnets', data)
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(res['subnet']['gateway_ip'], '10.10.1.254')
def test_allocate_specific_subnet_prefix_allocation_pools(self):
with self.network() as network:
sp = self._test_create_subnetpool(['10.10.0.0/16'],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='21')
# Request a specific subnet allocation
pools = [{'start': '10.10.1.2',
'end': '10.10.1.253'}]
data = {'subnet': {'network_id': network['network']['id'],
'subnetpool_id': sp['subnetpool']['id'],
'cidr': '10.10.1.0/24',
'gateway_ip': '10.10.1.1',
'ip_version': 4,
'allocation_pools': pools,
'tenant_id': network['network']['tenant_id']}}
req = self.new_create_request('subnets', data)
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(res['subnet']['allocation_pools'][0]['start'],
pools[0]['start'])
self.assertEqual(res['subnet']['allocation_pools'][0]['end'],
pools[0]['end'])
def test_allocate_any_subnet_prefix_allocation_pools(self):
with self.network() as network:
sp = self._test_create_subnetpool(['10.10.10.0/24'],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='21')
# Request an any subnet allocation
pools = [{'start': '10.10.10.1',
'end': '10.10.10.254'}]
data = {'subnet': {'network_id': network['network']['id'],
'subnetpool_id': sp['subnetpool']['id'],
'prefixlen': '24',
'ip_version': 4,
'allocation_pools': pools,
'tenant_id': network['network']['tenant_id']}}
req = self.new_create_request('subnets', data)
res = req.get_response(self.api)
self.assertEqual(res.status_int, 400)
def test_allocate_specific_subnet_prefix_too_large(self):
with self.network() as network:
sp = self._test_create_subnetpool(['10.10.0.0/16'],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='21',
max_prefixlen='21')
# Request a specific subnet allocation
data = {'subnet': {'network_id': network['network']['id'],
'subnetpool_id': sp['subnetpool']['id'],
'cidr': '10.10.0.0/24',
'ip_version': 4,
'tenant_id': network['network']['tenant_id']}}
req = self.new_create_request('subnets', data)
res = req.get_response(self.api)
self.assertEqual(res.status_int, 400)
def test_delete_subnetpool_existing_allocations(self):
with self.network() as network:
sp = self._test_create_subnetpool(['10.10.0.0/16'],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='21')
data = {'subnet': {'network_id': network['network']['id'],
'subnetpool_id': sp['subnetpool']['id'],
'cidr': '10.10.0.0/24',
'ip_version': 4,
'tenant_id': network['network']['tenant_id']}}
req = self.new_create_request('subnets', data)
req.get_response(self.api)
req = self.new_delete_request('subnetpools',
sp['subnetpool']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, 400)
def test_allocate_subnet_over_quota(self):
with self.network() as network:
sp = self._test_create_subnetpool(['10.10.0.0/16'],
tenant_id=self._tenant_id,
name=self._POOL_NAME,
min_prefixlen='21',
default_quota=2048)
# Request a specific subnet allocation
data = {'subnet': {'network_id': network['network']['id'],
'subnetpool_id': sp['subnetpool']['id'],
'ip_version': 4,
'prefixlen': 21,
'tenant_id': network['network']['tenant_id']}}
req = self.new_create_request('subnets', data)
# Allocate a subnet to fill the quota
res = req.get_response(self.api)
self.assertEqual(res.status_int, 201)
# Attempt to allocate a /21 again
res = req.get_response(self.api)
# Assert error
self.assertEqual(res.status_int, 409)
def test_allocate_any_ipv4_subnet_ipv6_pool(self):
with self.network() as network:
sp = self._test_create_subnetpool(['2001:db8:1:2::/63'],
tenant_id=self._tenant_id,
name=self._POOL_NAME)
# Request a specific subnet allocation
data = {'subnet': {'network_id': network['network']['id'],
'subnetpool_id': sp['subnetpool']['id'],
'ip_version': 4,
'tenant_id': network['network']['tenant_id']}}
req = self.new_create_request('subnets', data)
res = req.get_response(self.api)
self.assertEqual(res.status_int, 400)
class DbModelTestCase(base.BaseTestCase):
"""DB model tests."""
def test_repr(self):
"""testing the string representation of 'model' classes."""
network = models_v2.Network(name="net_net", status="OK",
admin_state_up=True)
actual_repr_output = repr(network)
exp_start_with = "<neutron.db.models_v2.Network"
exp_middle = "[object at %x]" % id(network)
exp_end_with = (" {tenant_id=None, id=None, "
"name='net_net', status='OK', "
"admin_state_up=True, shared=None, "
"mtu=None, vlan_transparent=None}>")
final_exp = exp_start_with + exp_middle + exp_end_with
self.assertEqual(actual_repr_output, final_exp)
class TestNeutronDbPluginV2(base.BaseTestCase):
"""Unit Tests for NeutronDbPluginV2 IPAM Logic."""
def test_generate_ip(self):
with mock.patch.object(non_ipam.IpamNonPluggableBackend,
'_try_generate_ip') as generate:
with mock.patch.object(non_ipam.IpamNonPluggableBackend,
'_rebuild_availability_ranges') as rebuild:
non_ipam.IpamNonPluggableBackend._generate_ip('c', 's')
generate.assert_called_once_with('c', 's')
self.assertEqual(0, rebuild.call_count)
def test_generate_ip_exhausted_pool(self):
with mock.patch.object(non_ipam.IpamNonPluggableBackend,
'_try_generate_ip') as generate:
with mock.patch.object(non_ipam.IpamNonPluggableBackend,
'_rebuild_availability_ranges') as rebuild:
exception = n_exc.IpAddressGenerationFailure(net_id='n')
# fail first call but not second
generate.side_effect = [exception, None]
non_ipam.IpamNonPluggableBackend._generate_ip('c', 's')
self.assertEqual(2, generate.call_count)
rebuild.assert_called_once_with('c', 's')
def _validate_rebuild_availability_ranges(self, pools, allocations,
expected):
ip_qry = mock.Mock()
ip_qry.with_lockmode.return_value = ip_qry
ip_qry.filter_by.return_value = allocations
pool_qry = mock.Mock()
pool_qry.options.return_value = pool_qry
pool_qry.with_lockmode.return_value = pool_qry
pool_qry.filter_by.return_value = pools
def return_queries_side_effect(*args, **kwargs):
if args[0] == models_v2.IPAllocation:
return ip_qry
if args[0] == models_v2.IPAllocationPool:
return pool_qry
context = mock.Mock()
context.session.query.side_effect = return_queries_side_effect
subnets = [mock.MagicMock()]
db_base_plugin_v2.NeutronDbPluginV2._rebuild_availability_ranges(
context, subnets)
actual = [[args[0].allocation_pool_id,
args[0].first_ip, args[0].last_ip]
for _name, args, _kwargs in context.session.add.mock_calls]
self.assertEqual(expected, actual)
def test_rebuild_availability_ranges(self):
pools = [{'id': 'a',
'first_ip': '192.168.1.3',
'last_ip': '192.168.1.10'},
{'id': 'b',
'first_ip': '192.168.1.100',
'last_ip': '192.168.1.120'}]
allocations = [{'ip_address': '192.168.1.3'},
{'ip_address': '192.168.1.78'},
{'ip_address': '192.168.1.7'},
{'ip_address': '192.168.1.110'},
{'ip_address': '192.168.1.11'},
{'ip_address': '192.168.1.4'},
{'ip_address': '192.168.1.111'}]
expected = [['a', '192.168.1.5', '192.168.1.6'],
['a', '192.168.1.8', '192.168.1.10'],
['b', '192.168.1.100', '192.168.1.109'],
['b', '192.168.1.112', '192.168.1.120']]
self._validate_rebuild_availability_ranges(pools, allocations,
expected)
def test_rebuild_ipv6_availability_ranges(self):
pools = [{'id': 'a',
'first_ip': '2001::1',
'last_ip': '2001::50'},
{'id': 'b',
'first_ip': '2001::100',
'last_ip': '2001::ffff:ffff:ffff:fffe'}]
allocations = [{'ip_address': '2001::10'},
{'ip_address': '2001::45'},
{'ip_address': '2001::60'},
{'ip_address': '2001::111'},
{'ip_address': '2001::200'},
{'ip_address': '2001::ffff:ffff:ffff:ff10'},
{'ip_address': '2001::ffff:ffff:ffff:f2f0'}]
expected = [['a', '2001::1', '2001::f'],
['a', '2001::11', '2001::44'],
['a', '2001::46', '2001::50'],
['b', '2001::100', '2001::110'],
['b', '2001::112', '2001::1ff'],
['b', '2001::201', '2001::ffff:ffff:ffff:f2ef'],
['b', '2001::ffff:ffff:ffff:f2f1',
'2001::ffff:ffff:ffff:ff0f'],
['b', '2001::ffff:ffff:ffff:ff11',
'2001::ffff:ffff:ffff:fffe']]
self._validate_rebuild_availability_ranges(pools, allocations,
expected)
def _test__allocate_ips_for_port(self, subnets, port, expected):
plugin = db_base_plugin_v2.NeutronDbPluginV2()
with mock.patch.object(db_base_plugin_v2.NeutronDbPluginV2,
'_get_subnets') as get_subnets:
with mock.patch.object(db_base_plugin_v2.NeutronDbPluginV2,
'_check_unique_ip') as check_unique:
context = mock.Mock()
get_subnets.return_value = subnets
check_unique.return_value = True
actual = plugin._allocate_ips_for_port(context, port)
self.assertEqual(expected, actual)
def test__allocate_ips_for_port_2_slaac_subnets(self):
subnets = [
{
'cidr': u'2001:100::/64',
'enable_dhcp': True,
'gateway_ip': u'2001:100::1',
'id': u'd1a28edd-bd83-480a-bd40-93d036c89f13',
'network_id': 'fbb9b578-95eb-4b79-a116-78e5c4927176',
'ip_version': 6,
'ipv6_address_mode': None,
'ipv6_ra_mode': u'slaac'},
{
'cidr': u'2001:200::/64',
'enable_dhcp': True,
'gateway_ip': u'2001:200::1',
'id': u'dc813d3d-ed66-4184-8570-7325c8195e28',
'network_id': 'fbb9b578-95eb-4b79-a116-78e5c4927176',
'ip_version': 6,
'ipv6_address_mode': None,
'ipv6_ra_mode': u'slaac'}]
port = {'port': {
'network_id': 'fbb9b578-95eb-4b79-a116-78e5c4927176',
'fixed_ips': attributes.ATTR_NOT_SPECIFIED,
'mac_address': '12:34:56:78:44:ab',
'device_owner': 'compute'}}
expected = []
for subnet in subnets:
addr = str(ipv6_utils.get_ipv6_addr_by_EUI64(
subnet['cidr'], port['port']['mac_address']))
expected.append({'ip_address': addr, 'subnet_id': subnet['id']})
self._test__allocate_ips_for_port(subnets, port, expected)
class NeutronDbPluginV2AsMixinTestCase(NeutronDbPluginV2TestCase,
testlib_api.SqlTestCase):
"""Tests for NeutronDbPluginV2 as Mixin.
While NeutronDbPluginV2TestCase checks NeutronDbPlugin and all plugins as
a complete plugin, this test case verifies abilities of NeutronDbPlugin
which are provided to other plugins (e.g. DB operations). This test case
may include tests only for NeutronDbPlugin, so this should not be used in
unit tests for other plugins.
"""
def setUp(self):
super(NeutronDbPluginV2AsMixinTestCase, self).setUp()
self.plugin = importutils.import_object(DB_PLUGIN_KLASS)
self.context = context.get_admin_context()
self.net_data = {'network': {'id': 'fake-id',
'name': 'net1',
'admin_state_up': True,
'tenant_id': 'test-tenant',
'shared': False}}
def test_create_network_with_default_status(self):
net = self.plugin.create_network(self.context, self.net_data)
default_net_create_status = 'ACTIVE'
expected = [('id', 'fake-id'), ('name', 'net1'),
('admin_state_up', True), ('tenant_id', 'test-tenant'),
('shared', False), ('status', default_net_create_status)]
for k, v in expected:
self.assertEqual(net[k], v)
def test_create_network_with_status_BUILD(self):
self.net_data['network']['status'] = 'BUILD'
net = self.plugin.create_network(self.context, self.net_data)
self.assertEqual(net['status'], 'BUILD')
def test_get_user_allocation_for_dhcp_port_returns_none(self):
plugin = manager.NeutronManager.get_plugin()
with self.network() as net, self.network() as net1:
with self.subnet(network=net, cidr='10.0.0.0/24') as subnet,\
self.subnet(network=net1, cidr='10.0.1.0/24') as subnet1:
with self.port(subnet=subnet, device_owner='network:dhcp'),\
self.port(subnet=subnet1):
# check that user allocations on another network don't
# affect _subnet_get_user_allocation method
res = plugin._subnet_get_user_allocation(
context.get_admin_context(),
subnet['subnet']['id'])
self.assertIsNone(res)
def test__validate_network_subnetpools(self):
network = models_v2.Network()
network.subnets = [models_v2.Subnet(subnetpool_id='test_id',
ip_version=4)]
new_subnetpool_id = None
self.assertRaises(n_exc.NetworkSubnetPoolAffinityError,
self.plugin._validate_network_subnetpools,
network, new_subnetpool_id, 4)
class TestNetworks(testlib_api.SqlTestCase):
def setUp(self):
super(TestNetworks, self).setUp()
self._tenant_id = 'test-tenant'
# Update the plugin
self.setup_coreplugin(DB_PLUGIN_KLASS)
def _create_network(self, plugin, ctx, shared=True):
network = {'network': {'name': 'net',
'shared': shared,
'admin_state_up': True,
'tenant_id': self._tenant_id}}
created_network = plugin.create_network(ctx, network)
return (network, created_network['id'])
def _create_port(self, plugin, ctx, net_id, device_owner, tenant_id):
port = {'port': {'name': 'port',
'network_id': net_id,
'mac_address': attributes.ATTR_NOT_SPECIFIED,
'fixed_ips': attributes.ATTR_NOT_SPECIFIED,
'admin_state_up': True,
'device_id': 'device_id',
'device_owner': device_owner,
'tenant_id': tenant_id}}
plugin.create_port(ctx, port)
def _test_update_shared_net_used(self,
device_owner,
expected_exception=None):
plugin = manager.NeutronManager.get_plugin()
ctx = context.get_admin_context()
network, net_id = self._create_network(plugin, ctx)
self._create_port(plugin,
ctx,
net_id,
device_owner,
self._tenant_id + '1')
network['network']['shared'] = False
if (expected_exception):
with testlib_api.ExpectedException(expected_exception):
plugin.update_network(ctx, net_id, network)
else:
plugin.update_network(ctx, net_id, network)
def test_update_shared_net_used_fails(self):
self._test_update_shared_net_used('', n_exc.InvalidSharedSetting)
def test_update_shared_net_used_as_router_gateway(self):
self._test_update_shared_net_used(
constants.DEVICE_OWNER_ROUTER_GW)
def test_update_shared_net_used_by_floating_ip(self):
self._test_update_shared_net_used(
constants.DEVICE_OWNER_FLOATINGIP)
| apache-2.0 |
rgarcia-herrera/bici-tren | abm/flock_sim.py | 1 | 3274 | from time import sleep
# from datetime import datetime
import bike_agent as model
from router import refine, Router
import argparse
import random
parser = argparse.ArgumentParser(description='grab adscriptions from medline')
parser.add_argument('--init', type=int, default=50,
help="set to 0 to resume run")
args = parser.parse_args()
model.connect('mydb')
if args.init > 0:
# drop all bikes
for b in model.Bike.objects.all():
b.delete()
h = 0.0
# init source, target, speed for this many bikes
for n in range(args.init):
b = model.Bike()
b.speed = 20
b.random_ride(ne_lat=19.461332069967366,
ne_lng=-99.09204483032227,
sw_lat=19.40467336236742,
sw_lng=-99.17787551879884,
min_len=8, max_len=10)
b.route = refine(b.route, b.speed)
b.save()
sleep(0.1)
del(b)
def status_count():
solo = float(model.Bike.objects(status="solo").count())
in_flock = 0
for b in model.Bike.objects.all():
if b.in_flock(100):
in_flock += 1
# flocking = float(model.Bike.objects(status="flocking").count())
all = float(model.Bike.objects.count())
return {'solo': "%0.2f" % (solo / all),
'in': "%0.2f" % (in_flock / all)}
def mean_distance():
return sum([b.distance_to(b.destination)
for b in model.Bike.objects.all()]) \
/ model.Bike.objects.count()
t = 0
while model.Bike.objects.count() > 0:
print t # , model.Bike.objects(status='flocking').count()
t += 1
all_bikes = [b for b in model.Bike.objects.all()]
random.shuffle(all_bikes)
for b in all_bikes:
if b.got_there():
b.delete()
else:
b.step()
b.reload()
# route update
# if solo seek a flock
if b.status == "solo":
candidates = [c for c
in b.get_flock_candidates(
b.distance_to(b.destination)/2.0,
b.distance_to(b.destination)/3.0)]
if len(candidates) < 2:
break # no flock? continue solo
flock = model.Flock(candidates)
if abs(b.heading - b.heading_to(flock.centroid)) < 0.5:
router = Router(
points=[b.point['coordinates'],
flock.centroid,
b.destination['coordinates']])
b.route = refine(router.route, b.speed)
b.status = 'flocking'
b.speed = 30
b.save()
else:
# if flocking, check if my heading is too far
# from dest_heading
if abs(b.heading
- b.heading_to(b.destination['coordinates'])) > 0.8:
router = Router(points=[b.point['coordinates'],
b.destination['coordinates']])
b.route = refine(router.route, b.speed)
b.status = "solo"
b.speed = 20
b.save()
# print status_count(), mean_distance()
| gpl-3.0 |
ClearCorp-dev/odoo-clearcorp | TODO-7.0/product_cost_group/__openerp__.py | 4 | 1780 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Addons modules by CLEARCORP S.A.
# Copyright (C) 2009-TODAY CLEARCORP S.A. (<http://clearcorp.co.cr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : 'CLEARCORP Product Cost Group',
"version" : '2.0',
"author" : 'CLEARCORP S.A.',
#easy, normal, expert
'complexity': 'normal',
"description": """
Creates group to show/hide costs of products, customizes product views
""",
"category": 'Sales',
"sequence": 4,
"website" : "http://clearcorp.co.cr",
"images" : [],
"icon" : False,
"depends" : [
'base',
'stock'],
"init_xml" : [],
"demo_xml" : [],
"update_xml" : ['product_cost_group_view.xml',
'security/product_cost_group_security.xml',
],
"test" : [],
"auto_install": False,
"application": False,
"installable": True,
'license': 'AGPL-3',
}
| agpl-3.0 |
jtyr/ansible | test/lib/ansible_test/_internal/ci/shippable.py | 25 | 10040 | """Support code for working with Shippable."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
import time
from .. import types as t
from ..config import (
CommonConfig,
TestConfig,
)
from ..git import (
Git,
)
from ..http import (
HttpClient,
urlencode,
)
from ..util import (
ApplicationError,
display,
MissingEnvironmentVariable,
SubprocessError,
)
from . import (
AuthContext,
ChangeDetectionNotSupported,
CIProvider,
OpenSSLAuthHelper,
)
CODE = 'shippable'
class Shippable(CIProvider):
"""CI provider implementation for Shippable."""
def __init__(self):
self.auth = ShippableAuthHelper()
@staticmethod
def is_supported(): # type: () -> bool
"""Return True if this provider is supported in the current running environment."""
return os.environ.get('SHIPPABLE') == 'true'
@property
def code(self): # type: () -> str
"""Return a unique code representing this provider."""
return CODE
@property
def name(self): # type: () -> str
"""Return descriptive name for this provider."""
return 'Shippable'
def generate_resource_prefix(self): # type: () -> str
"""Return a resource prefix specific to this CI provider."""
try:
prefix = 'shippable-%s-%s' % (
os.environ['SHIPPABLE_BUILD_NUMBER'],
os.environ['SHIPPABLE_JOB_NUMBER'],
)
except KeyError as ex:
raise MissingEnvironmentVariable(name=ex.args[0])
return prefix
def get_base_branch(self): # type: () -> str
"""Return the base branch or an empty string."""
base_branch = os.environ.get('BASE_BRANCH')
if base_branch:
base_branch = 'origin/%s' % base_branch
return base_branch or ''
def detect_changes(self, args): # type: (TestConfig) -> t.Optional[t.List[str]]
"""Initialize change detection."""
result = ShippableChanges(args)
if result.is_pr:
job_type = 'pull request'
elif result.is_tag:
job_type = 'tag'
else:
job_type = 'merge commit'
display.info('Processing %s for branch %s commit %s' % (job_type, result.branch, result.commit))
if not args.metadata.changes:
args.metadata.populate_changes(result.diff)
if result.paths is None:
# There are several likely causes of this:
# - First run on a new branch.
# - Too many pull requests passed since the last merge run passed.
display.warning('No successful commit found. All tests will be executed.')
return result.paths
def supports_core_ci_auth(self, context): # type: (AuthContext) -> bool
"""Return True if Ansible Core CI is supported."""
return True
def prepare_core_ci_auth(self, context): # type: (AuthContext) -> t.Dict[str, t.Any]
"""Return authentication details for Ansible Core CI."""
try:
request = dict(
run_id=os.environ['SHIPPABLE_BUILD_ID'],
job_number=int(os.environ['SHIPPABLE_JOB_NUMBER']),
)
except KeyError as ex:
raise MissingEnvironmentVariable(name=ex.args[0])
self.auth.sign_request(request)
auth = dict(
shippable=request,
)
return auth
def get_git_details(self, args): # type: (CommonConfig) -> t.Optional[t.Dict[str, t.Any]]
"""Return details about git in the current environment."""
commit = os.environ.get('COMMIT')
base_commit = os.environ.get('BASE_COMMIT')
details = dict(
base_commit=base_commit,
commit=commit,
merged_commit=self._get_merged_commit(args, commit),
)
return details
# noinspection PyUnusedLocal
def _get_merged_commit(self, args, commit): # type: (CommonConfig, str) -> t.Optional[str] # pylint: disable=unused-argument
"""Find the merged commit that should be present."""
if not commit:
return None
git = Git()
try:
show_commit = git.run_git(['show', '--no-patch', '--no-abbrev', commit])
except SubprocessError as ex:
# This should only fail for pull requests where the commit does not exist.
# Merge runs would fail much earlier when attempting to checkout the commit.
raise ApplicationError('Commit %s was not found:\n\n%s\n\n'
'GitHub may not have fully replicated the commit across their infrastructure.\n'
'It is also possible the commit was removed by a force push between job creation and execution.\n'
'Find the latest run for the pull request and restart failed jobs as needed.'
% (commit, ex.stderr.strip()))
head_commit = git.run_git(['show', '--no-patch', '--no-abbrev', 'HEAD'])
if show_commit == head_commit:
# Commit is HEAD, so this is not a pull request or the base branch for the pull request is up-to-date.
return None
match_merge = re.search(r'^Merge: (?P<parents>[0-9a-f]{40} [0-9a-f]{40})$', head_commit, flags=re.MULTILINE)
if not match_merge:
# The most likely scenarios resulting in a failure here are:
# A new run should or does supersede this job, but it wasn't cancelled in time.
# A job was superseded and then later restarted.
raise ApplicationError('HEAD is not commit %s or a merge commit:\n\n%s\n\n'
'This job has likely been superseded by another run due to additional commits being pushed.\n'
'Find the latest run for the pull request and restart failed jobs as needed.'
% (commit, head_commit.strip()))
parents = set(match_merge.group('parents').split(' '))
if len(parents) != 2:
raise ApplicationError('HEAD is a %d-way octopus merge.' % len(parents))
if commit not in parents:
raise ApplicationError('Commit %s is not a parent of HEAD.' % commit)
parents.remove(commit)
last_commit = parents.pop()
return last_commit
class ShippableAuthHelper(OpenSSLAuthHelper):
"""
Authentication helper for Shippable.
Based on OpenSSL since cryptography is not provided by the default Shippable environment.
"""
def publish_public_key(self, public_key_pem): # type: (str) -> None
"""Publish the given public key."""
# display the public key as a single line to avoid mangling such as when prefixing each line with a timestamp
display.info(public_key_pem.replace('\n', ' '))
# allow time for logs to become available to reduce repeated API calls
time.sleep(3)
class ShippableChanges:
"""Change information for Shippable build."""
def __init__(self, args): # type: (TestConfig) -> None
self.args = args
self.git = Git()
try:
self.branch = os.environ['BRANCH']
self.is_pr = os.environ['IS_PULL_REQUEST'] == 'true'
self.is_tag = os.environ['IS_GIT_TAG'] == 'true'
self.commit = os.environ['COMMIT']
self.project_id = os.environ['PROJECT_ID']
self.commit_range = os.environ['SHIPPABLE_COMMIT_RANGE']
except KeyError as ex:
raise MissingEnvironmentVariable(name=ex.args[0])
if self.is_tag:
raise ChangeDetectionNotSupported('Change detection is not supported for tags.')
if self.is_pr:
self.paths = sorted(self.git.get_diff_names([self.commit_range]))
self.diff = self.git.get_diff([self.commit_range])
else:
commits = self.get_successful_merge_run_commits(self.project_id, self.branch)
last_successful_commit = self.get_last_successful_commit(commits)
if last_successful_commit:
self.paths = sorted(self.git.get_diff_names([last_successful_commit, self.commit]))
self.diff = self.git.get_diff([last_successful_commit, self.commit])
else:
# first run for branch
self.paths = None # act as though change detection not enabled, do not filter targets
self.diff = []
def get_successful_merge_run_commits(self, project_id, branch): # type: (str, str) -> t.Set[str]
"""Return a set of recent successsful merge commits from Shippable for the given project and branch."""
parameters = dict(
isPullRequest='false',
projectIds=project_id,
branch=branch,
)
url = 'https://api.shippable.com/runs?%s' % urlencode(parameters)
http = HttpClient(self.args, always=True)
response = http.get(url)
result = response.json()
if 'id' in result and result['id'] == 4004:
# most likely due to a private project, which returns an HTTP 200 response with JSON
display.warning('Unable to find project. Cannot determine changes. All tests will be executed.')
return set()
commits = set(run['commitSha'] for run in result if run['statusCode'] == 30)
return commits
def get_last_successful_commit(self, successful_commits): # type: (t.Set[str]) -> t.Optional[str]
"""Return the last successful commit from git history that is found in the given commit list, or None."""
commit_history = self.git.get_rev_list(max_count=100)
ordered_successful_commits = [commit for commit in commit_history if commit in successful_commits]
last_successful_commit = ordered_successful_commits[0] if ordered_successful_commits else None
return last_successful_commit
| gpl-3.0 |
rafaeltomesouza/frontend-class1 | aula2/a15/linkedin/client/.gradle/yarn/node_modules/node-gyp/gyp/pylib/gyp/xcodeproj_file.py | 1366 | 120842 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Xcode project file generator.
This module is both an Xcode project file generator and a documentation of the
Xcode project file format. Knowledge of the project file format was gained
based on extensive experience with Xcode, and by making changes to projects in
Xcode.app and observing the resultant changes in the associated project files.
XCODE PROJECT FILES
The generator targets the file format as written by Xcode 3.2 (specifically,
3.2.6), but past experience has taught that the format has not changed
significantly in the past several years, and future versions of Xcode are able
to read older project files.
Xcode project files are "bundled": the project "file" from an end-user's
perspective is actually a directory with an ".xcodeproj" extension. The
project file from this module's perspective is actually a file inside this
directory, always named "project.pbxproj". This file contains a complete
description of the project and is all that is needed to use the xcodeproj.
Other files contained in the xcodeproj directory are simply used to store
per-user settings, such as the state of various UI elements in the Xcode
application.
The project.pbxproj file is a property list, stored in a format almost
identical to the NeXTstep property list format. The file is able to carry
Unicode data, and is encoded in UTF-8. The root element in the property list
is a dictionary that contains several properties of minimal interest, and two
properties of immense interest. The most important property is a dictionary
named "objects". The entire structure of the project is represented by the
children of this property. The objects dictionary is keyed by unique 96-bit
values represented by 24 uppercase hexadecimal characters. Each value in the
objects dictionary is itself a dictionary, describing an individual object.
Each object in the dictionary is a member of a class, which is identified by
the "isa" property of each object. A variety of classes are represented in a
project file. Objects can refer to other objects by ID, using the 24-character
hexadecimal object key. A project's objects form a tree, with a root object
of class PBXProject at the root. As an example, the PBXProject object serves
as parent to an XCConfigurationList object defining the build configurations
used in the project, a PBXGroup object serving as a container for all files
referenced in the project, and a list of target objects, each of which defines
a target in the project. There are several different types of target object,
such as PBXNativeTarget and PBXAggregateTarget. In this module, this
relationship is expressed by having each target type derive from an abstract
base named XCTarget.
The project.pbxproj file's root dictionary also contains a property, sibling to
the "objects" dictionary, named "rootObject". The value of rootObject is a
24-character object key referring to the root PBXProject object in the
objects dictionary.
In Xcode, every file used as input to a target or produced as a final product
of a target must appear somewhere in the hierarchy rooted at the PBXGroup
object referenced by the PBXProject's mainGroup property. A PBXGroup is
generally represented as a folder in the Xcode application. PBXGroups can
contain other PBXGroups as well as PBXFileReferences, which are pointers to
actual files.
Each XCTarget contains a list of build phases, represented in this module by
the abstract base XCBuildPhase. Examples of concrete XCBuildPhase derivations
are PBXSourcesBuildPhase and PBXFrameworksBuildPhase, which correspond to the
"Compile Sources" and "Link Binary With Libraries" phases displayed in the
Xcode application. Files used as input to these phases (for example, source
files in the former case and libraries and frameworks in the latter) are
represented by PBXBuildFile objects, referenced by elements of "files" lists
in XCTarget objects. Each PBXBuildFile object refers to a PBXBuildFile
object as a "weak" reference: it does not "own" the PBXBuildFile, which is
owned by the root object's mainGroup or a descendant group. In most cases, the
layer of indirection between an XCBuildPhase and a PBXFileReference via a
PBXBuildFile appears extraneous, but there's actually one reason for this:
file-specific compiler flags are added to the PBXBuildFile object so as to
allow a single file to be a member of multiple targets while having distinct
compiler flags for each. These flags can be modified in the Xcode applciation
in the "Build" tab of a File Info window.
When a project is open in the Xcode application, Xcode will rewrite it. As
such, this module is careful to adhere to the formatting used by Xcode, to
avoid insignificant changes appearing in the file when it is used in the
Xcode application. This will keep version control repositories happy, and
makes it possible to compare a project file used in Xcode to one generated by
this module to determine if any significant changes were made in the
application.
Xcode has its own way of assigning 24-character identifiers to each object,
which is not duplicated here. Because the identifier only is only generated
once, when an object is created, and is then left unchanged, there is no need
to attempt to duplicate Xcode's behavior in this area. The generator is free
to select any identifier, even at random, to refer to the objects it creates,
and Xcode will retain those identifiers and use them when subsequently
rewriting the project file. However, the generator would choose new random
identifiers each time the project files are generated, leading to difficulties
comparing "used" project files to "pristine" ones produced by this module,
and causing the appearance of changes as every object identifier is changed
when updated projects are checked in to a version control repository. To
mitigate this problem, this module chooses identifiers in a more deterministic
way, by hashing a description of each object as well as its parent and ancestor
objects. This strategy should result in minimal "shift" in IDs as successive
generations of project files are produced.
THIS MODULE
This module introduces several classes, all derived from the XCObject class.
Nearly all of the "brains" are built into the XCObject class, which understands
how to create and modify objects, maintain the proper tree structure, compute
identifiers, and print objects. For the most part, classes derived from
XCObject need only provide a _schema class object, a dictionary that
expresses what properties objects of the class may contain.
Given this structure, it's possible to build a minimal project file by creating
objects of the appropriate types and making the proper connections:
config_list = XCConfigurationList()
group = PBXGroup()
project = PBXProject({'buildConfigurationList': config_list,
'mainGroup': group})
With the project object set up, it can be added to an XCProjectFile object.
XCProjectFile is a pseudo-class in the sense that it is a concrete XCObject
subclass that does not actually correspond to a class type found in a project
file. Rather, it is used to represent the project file's root dictionary.
Printing an XCProjectFile will print the entire project file, including the
full "objects" dictionary.
project_file = XCProjectFile({'rootObject': project})
project_file.ComputeIDs()
project_file.Print()
Xcode project files are always encoded in UTF-8. This module will accept
strings of either the str class or the unicode class. Strings of class str
are assumed to already be encoded in UTF-8. Obviously, if you're just using
ASCII, you won't encounter difficulties because ASCII is a UTF-8 subset.
Strings of class unicode are handled properly and encoded in UTF-8 when
a project file is output.
"""
import gyp.common
import posixpath
import re
import struct
import sys
# hashlib is supplied as of Python 2.5 as the replacement interface for sha
# and other secure hashes. In 2.6, sha is deprecated. Import hashlib if
# available, avoiding a deprecation warning under 2.6. Import sha otherwise,
# preserving 2.4 compatibility.
try:
import hashlib
_new_sha1 = hashlib.sha1
except ImportError:
import sha
_new_sha1 = sha.new
# See XCObject._EncodeString. This pattern is used to determine when a string
# can be printed unquoted. Strings that match this pattern may be printed
# unquoted. Strings that do not match must be quoted and may be further
# transformed to be properly encoded. Note that this expression matches the
# characters listed with "+", for 1 or more occurrences: if a string is empty,
# it must not match this pattern, because it needs to be encoded as "".
_unquoted = re.compile('^[A-Za-z0-9$./_]+$')
# Strings that match this pattern are quoted regardless of what _unquoted says.
# Oddly, Xcode will quote any string with a run of three or more underscores.
_quoted = re.compile('___')
# This pattern should match any character that needs to be escaped by
# XCObject._EncodeString. See that function.
_escaped = re.compile('[\\\\"]|[\x00-\x1f]')
# Used by SourceTreeAndPathFromPath
_path_leading_variable = re.compile(r'^\$\((.*?)\)(/(.*))?$')
def SourceTreeAndPathFromPath(input_path):
"""Given input_path, returns a tuple with sourceTree and path values.
Examples:
input_path (source_tree, output_path)
'$(VAR)/path' ('VAR', 'path')
'$(VAR)' ('VAR', None)
'path' (None, 'path')
"""
source_group_match = _path_leading_variable.match(input_path)
if source_group_match:
source_tree = source_group_match.group(1)
output_path = source_group_match.group(3) # This may be None.
else:
source_tree = None
output_path = input_path
return (source_tree, output_path)
def ConvertVariablesToShellSyntax(input_string):
return re.sub(r'\$\((.*?)\)', '${\\1}', input_string)
class XCObject(object):
"""The abstract base of all class types used in Xcode project files.
Class variables:
_schema: A dictionary defining the properties of this class. The keys to
_schema are string property keys as used in project files. Values
are a list of four or five elements:
[ is_list, property_type, is_strong, is_required, default ]
is_list: True if the property described is a list, as opposed
to a single element.
property_type: The type to use as the value of the property,
or if is_list is True, the type to use for each
element of the value's list. property_type must
be an XCObject subclass, or one of the built-in
types str, int, or dict.
is_strong: If property_type is an XCObject subclass, is_strong
is True to assert that this class "owns," or serves
as parent, to the property value (or, if is_list is
True, values). is_strong must be False if
property_type is not an XCObject subclass.
is_required: True if the property is required for the class.
Note that is_required being True does not preclude
an empty string ("", in the case of property_type
str) or list ([], in the case of is_list True) from
being set for the property.
default: Optional. If is_requried is True, default may be set
to provide a default value for objects that do not supply
their own value. If is_required is True and default
is not provided, users of the class must supply their own
value for the property.
Note that although the values of the array are expressed in
boolean terms, subclasses provide values as integers to conserve
horizontal space.
_should_print_single_line: False in XCObject. Subclasses whose objects
should be written to the project file in the
alternate single-line format, such as
PBXFileReference and PBXBuildFile, should
set this to True.
_encode_transforms: Used by _EncodeString to encode unprintable characters.
The index into this list is the ordinal of the
character to transform; each value is a string
used to represent the character in the output. XCObject
provides an _encode_transforms list suitable for most
XCObject subclasses.
_alternate_encode_transforms: Provided for subclasses that wish to use
the alternate encoding rules. Xcode seems
to use these rules when printing objects in
single-line format. Subclasses that desire
this behavior should set _encode_transforms
to _alternate_encode_transforms.
_hashables: A list of XCObject subclasses that can be hashed by ComputeIDs
to construct this object's ID. Most classes that need custom
hashing behavior should do it by overriding Hashables,
but in some cases an object's parent may wish to push a
hashable value into its child, and it can do so by appending
to _hashables.
Attributes:
id: The object's identifier, a 24-character uppercase hexadecimal string.
Usually, objects being created should not set id until the entire
project file structure is built. At that point, UpdateIDs() should
be called on the root object to assign deterministic values for id to
each object in the tree.
parent: The object's parent. This is set by a parent XCObject when a child
object is added to it.
_properties: The object's property dictionary. An object's properties are
described by its class' _schema variable.
"""
_schema = {}
_should_print_single_line = False
# See _EncodeString.
_encode_transforms = []
i = 0
while i < ord(' '):
_encode_transforms.append('\\U%04x' % i)
i = i + 1
_encode_transforms[7] = '\\a'
_encode_transforms[8] = '\\b'
_encode_transforms[9] = '\\t'
_encode_transforms[10] = '\\n'
_encode_transforms[11] = '\\v'
_encode_transforms[12] = '\\f'
_encode_transforms[13] = '\\n'
_alternate_encode_transforms = list(_encode_transforms)
_alternate_encode_transforms[9] = chr(9)
_alternate_encode_transforms[10] = chr(10)
_alternate_encode_transforms[11] = chr(11)
def __init__(self, properties=None, id=None, parent=None):
self.id = id
self.parent = parent
self._properties = {}
self._hashables = []
self._SetDefaultsFromSchema()
self.UpdateProperties(properties)
def __repr__(self):
try:
name = self.Name()
except NotImplementedError:
return '<%s at 0x%x>' % (self.__class__.__name__, id(self))
return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self))
def Copy(self):
"""Make a copy of this object.
The new object will have its own copy of lists and dicts. Any XCObject
objects owned by this object (marked "strong") will be copied in the
new object, even those found in lists. If this object has any weak
references to other XCObjects, the same references are added to the new
object without making a copy.
"""
that = self.__class__(id=self.id, parent=self.parent)
for key, value in self._properties.iteritems():
is_strong = self._schema[key][2]
if isinstance(value, XCObject):
if is_strong:
new_value = value.Copy()
new_value.parent = that
that._properties[key] = new_value
else:
that._properties[key] = value
elif isinstance(value, str) or isinstance(value, unicode) or \
isinstance(value, int):
that._properties[key] = value
elif isinstance(value, list):
if is_strong:
# If is_strong is True, each element is an XCObject, so it's safe to
# call Copy.
that._properties[key] = []
for item in value:
new_item = item.Copy()
new_item.parent = that
that._properties[key].append(new_item)
else:
that._properties[key] = value[:]
elif isinstance(value, dict):
# dicts are never strong.
if is_strong:
raise TypeError('Strong dict for key ' + key + ' in ' + \
self.__class__.__name__)
else:
that._properties[key] = value.copy()
else:
raise TypeError('Unexpected type ' + value.__class__.__name__ + \
' for key ' + key + ' in ' + self.__class__.__name__)
return that
def Name(self):
"""Return the name corresponding to an object.
Not all objects necessarily need to be nameable, and not all that do have
a "name" property. Override as needed.
"""
# If the schema indicates that "name" is required, try to access the
# property even if it doesn't exist. This will result in a KeyError
# being raised for the property that should be present, which seems more
# appropriate than NotImplementedError in this case.
if 'name' in self._properties or \
('name' in self._schema and self._schema['name'][3]):
return self._properties['name']
raise NotImplementedError(self.__class__.__name__ + ' must implement Name')
def Comment(self):
"""Return a comment string for the object.
Most objects just use their name as the comment, but PBXProject uses
different values.
The returned comment is not escaped and does not have any comment marker
strings applied to it.
"""
return self.Name()
def Hashables(self):
hashables = [self.__class__.__name__]
name = self.Name()
if name != None:
hashables.append(name)
hashables.extend(self._hashables)
return hashables
def HashablesForChild(self):
return None
def ComputeIDs(self, recursive=True, overwrite=True, seed_hash=None):
"""Set "id" properties deterministically.
An object's "id" property is set based on a hash of its class type and
name, as well as the class type and name of all ancestor objects. As
such, it is only advisable to call ComputeIDs once an entire project file
tree is built.
If recursive is True, recurse into all descendant objects and update their
hashes.
If overwrite is True, any existing value set in the "id" property will be
replaced.
"""
def _HashUpdate(hash, data):
"""Update hash with data's length and contents.
If the hash were updated only with the value of data, it would be
possible for clowns to induce collisions by manipulating the names of
their objects. By adding the length, it's exceedingly less likely that
ID collisions will be encountered, intentionally or not.
"""
hash.update(struct.pack('>i', len(data)))
hash.update(data)
if seed_hash is None:
seed_hash = _new_sha1()
hash = seed_hash.copy()
hashables = self.Hashables()
assert len(hashables) > 0
for hashable in hashables:
_HashUpdate(hash, hashable)
if recursive:
hashables_for_child = self.HashablesForChild()
if hashables_for_child is None:
child_hash = hash
else:
assert len(hashables_for_child) > 0
child_hash = seed_hash.copy()
for hashable in hashables_for_child:
_HashUpdate(child_hash, hashable)
for child in self.Children():
child.ComputeIDs(recursive, overwrite, child_hash)
if overwrite or self.id is None:
# Xcode IDs are only 96 bits (24 hex characters), but a SHA-1 digest is
# is 160 bits. Instead of throwing out 64 bits of the digest, xor them
# into the portion that gets used.
assert hash.digest_size % 4 == 0
digest_int_count = hash.digest_size / 4
digest_ints = struct.unpack('>' + 'I' * digest_int_count, hash.digest())
id_ints = [0, 0, 0]
for index in xrange(0, digest_int_count):
id_ints[index % 3] ^= digest_ints[index]
self.id = '%08X%08X%08X' % tuple(id_ints)
def EnsureNoIDCollisions(self):
"""Verifies that no two objects have the same ID. Checks all descendants.
"""
ids = {}
descendants = self.Descendants()
for descendant in descendants:
if descendant.id in ids:
other = ids[descendant.id]
raise KeyError(
'Duplicate ID %s, objects "%s" and "%s" in "%s"' % \
(descendant.id, str(descendant._properties),
str(other._properties), self._properties['rootObject'].Name()))
ids[descendant.id] = descendant
def Children(self):
"""Returns a list of all of this object's owned (strong) children."""
children = []
for property, attributes in self._schema.iteritems():
(is_list, property_type, is_strong) = attributes[0:3]
if is_strong and property in self._properties:
if not is_list:
children.append(self._properties[property])
else:
children.extend(self._properties[property])
return children
def Descendants(self):
"""Returns a list of all of this object's descendants, including this
object.
"""
children = self.Children()
descendants = [self]
for child in children:
descendants.extend(child.Descendants())
return descendants
def PBXProjectAncestor(self):
# The base case for recursion is defined at PBXProject.PBXProjectAncestor.
if self.parent:
return self.parent.PBXProjectAncestor()
return None
def _EncodeComment(self, comment):
"""Encodes a comment to be placed in the project file output, mimicing
Xcode behavior.
"""
# This mimics Xcode behavior by wrapping the comment in "/*" and "*/". If
# the string already contains a "*/", it is turned into "(*)/". This keeps
# the file writer from outputting something that would be treated as the
# end of a comment in the middle of something intended to be entirely a
# comment.
return '/* ' + comment.replace('*/', '(*)/') + ' */'
def _EncodeTransform(self, match):
# This function works closely with _EncodeString. It will only be called
# by re.sub with match.group(0) containing a character matched by the
# the _escaped expression.
char = match.group(0)
# Backslashes (\) and quotation marks (") are always replaced with a
# backslash-escaped version of the same. Everything else gets its
# replacement from the class' _encode_transforms array.
if char == '\\':
return '\\\\'
if char == '"':
return '\\"'
return self._encode_transforms[ord(char)]
def _EncodeString(self, value):
"""Encodes a string to be placed in the project file output, mimicing
Xcode behavior.
"""
# Use quotation marks when any character outside of the range A-Z, a-z, 0-9,
# $ (dollar sign), . (period), and _ (underscore) is present. Also use
# quotation marks to represent empty strings.
#
# Escape " (double-quote) and \ (backslash) by preceding them with a
# backslash.
#
# Some characters below the printable ASCII range are encoded specially:
# 7 ^G BEL is encoded as "\a"
# 8 ^H BS is encoded as "\b"
# 11 ^K VT is encoded as "\v"
# 12 ^L NP is encoded as "\f"
# 127 ^? DEL is passed through as-is without escaping
# - In PBXFileReference and PBXBuildFile objects:
# 9 ^I HT is passed through as-is without escaping
# 10 ^J NL is passed through as-is without escaping
# 13 ^M CR is passed through as-is without escaping
# - In other objects:
# 9 ^I HT is encoded as "\t"
# 10 ^J NL is encoded as "\n"
# 13 ^M CR is encoded as "\n" rendering it indistinguishable from
# 10 ^J NL
# All other characters within the ASCII control character range (0 through
# 31 inclusive) are encoded as "\U001f" referring to the Unicode code point
# in hexadecimal. For example, character 14 (^N SO) is encoded as "\U000e".
# Characters above the ASCII range are passed through to the output encoded
# as UTF-8 without any escaping. These mappings are contained in the
# class' _encode_transforms list.
if _unquoted.search(value) and not _quoted.search(value):
return value
return '"' + _escaped.sub(self._EncodeTransform, value) + '"'
def _XCPrint(self, file, tabs, line):
file.write('\t' * tabs + line)
def _XCPrintableValue(self, tabs, value, flatten_list=False):
"""Returns a representation of value that may be printed in a project file,
mimicing Xcode's behavior.
_XCPrintableValue can handle str and int values, XCObjects (which are
made printable by returning their id property), and list and dict objects
composed of any of the above types. When printing a list or dict, and
_should_print_single_line is False, the tabs parameter is used to determine
how much to indent the lines corresponding to the items in the list or
dict.
If flatten_list is True, single-element lists will be transformed into
strings.
"""
printable = ''
comment = None
if self._should_print_single_line:
sep = ' '
element_tabs = ''
end_tabs = ''
else:
sep = '\n'
element_tabs = '\t' * (tabs + 1)
end_tabs = '\t' * tabs
if isinstance(value, XCObject):
printable += value.id
comment = value.Comment()
elif isinstance(value, str):
printable += self._EncodeString(value)
elif isinstance(value, unicode):
printable += self._EncodeString(value.encode('utf-8'))
elif isinstance(value, int):
printable += str(value)
elif isinstance(value, list):
if flatten_list and len(value) <= 1:
if len(value) == 0:
printable += self._EncodeString('')
else:
printable += self._EncodeString(value[0])
else:
printable = '(' + sep
for item in value:
printable += element_tabs + \
self._XCPrintableValue(tabs + 1, item, flatten_list) + \
',' + sep
printable += end_tabs + ')'
elif isinstance(value, dict):
printable = '{' + sep
for item_key, item_value in sorted(value.iteritems()):
printable += element_tabs + \
self._XCPrintableValue(tabs + 1, item_key, flatten_list) + ' = ' + \
self._XCPrintableValue(tabs + 1, item_value, flatten_list) + ';' + \
sep
printable += end_tabs + '}'
else:
raise TypeError("Can't make " + value.__class__.__name__ + ' printable')
if comment != None:
printable += ' ' + self._EncodeComment(comment)
return printable
def _XCKVPrint(self, file, tabs, key, value):
"""Prints a key and value, members of an XCObject's _properties dictionary,
to file.
tabs is an int identifying the indentation level. If the class'
_should_print_single_line variable is True, tabs is ignored and the
key-value pair will be followed by a space insead of a newline.
"""
if self._should_print_single_line:
printable = ''
after_kv = ' '
else:
printable = '\t' * tabs
after_kv = '\n'
# Xcode usually prints remoteGlobalIDString values in PBXContainerItemProxy
# objects without comments. Sometimes it prints them with comments, but
# the majority of the time, it doesn't. To avoid unnecessary changes to
# the project file after Xcode opens it, don't write comments for
# remoteGlobalIDString. This is a sucky hack and it would certainly be
# cleaner to extend the schema to indicate whether or not a comment should
# be printed, but since this is the only case where the problem occurs and
# Xcode itself can't seem to make up its mind, the hack will suffice.
#
# Also see PBXContainerItemProxy._schema['remoteGlobalIDString'].
if key == 'remoteGlobalIDString' and isinstance(self,
PBXContainerItemProxy):
value_to_print = value.id
else:
value_to_print = value
# PBXBuildFile's settings property is represented in the output as a dict,
# but a hack here has it represented as a string. Arrange to strip off the
# quotes so that it shows up in the output as expected.
if key == 'settings' and isinstance(self, PBXBuildFile):
strip_value_quotes = True
else:
strip_value_quotes = False
# In another one-off, let's set flatten_list on buildSettings properties
# of XCBuildConfiguration objects, because that's how Xcode treats them.
if key == 'buildSettings' and isinstance(self, XCBuildConfiguration):
flatten_list = True
else:
flatten_list = False
try:
printable_key = self._XCPrintableValue(tabs, key, flatten_list)
printable_value = self._XCPrintableValue(tabs, value_to_print,
flatten_list)
if strip_value_quotes and len(printable_value) > 1 and \
printable_value[0] == '"' and printable_value[-1] == '"':
printable_value = printable_value[1:-1]
printable += printable_key + ' = ' + printable_value + ';' + after_kv
except TypeError, e:
gyp.common.ExceptionAppend(e,
'while printing key "%s"' % key)
raise
self._XCPrint(file, 0, printable)
def Print(self, file=sys.stdout):
"""Prints a reprentation of this object to file, adhering to Xcode output
formatting.
"""
self.VerifyHasRequiredProperties()
if self._should_print_single_line:
# When printing an object in a single line, Xcode doesn't put any space
# between the beginning of a dictionary (or presumably a list) and the
# first contained item, so you wind up with snippets like
# ...CDEF = {isa = PBXFileReference; fileRef = 0123...
# If it were me, I would have put a space in there after the opening
# curly, but I guess this is just another one of those inconsistencies
# between how Xcode prints PBXFileReference and PBXBuildFile objects as
# compared to other objects. Mimic Xcode's behavior here by using an
# empty string for sep.
sep = ''
end_tabs = 0
else:
sep = '\n'
end_tabs = 2
# Start the object. For example, '\t\tPBXProject = {\n'.
self._XCPrint(file, 2, self._XCPrintableValue(2, self) + ' = {' + sep)
# "isa" isn't in the _properties dictionary, it's an intrinsic property
# of the class which the object belongs to. Xcode always outputs "isa"
# as the first element of an object dictionary.
self._XCKVPrint(file, 3, 'isa', self.__class__.__name__)
# The remaining elements of an object dictionary are sorted alphabetically.
for property, value in sorted(self._properties.iteritems()):
self._XCKVPrint(file, 3, property, value)
# End the object.
self._XCPrint(file, end_tabs, '};\n')
def UpdateProperties(self, properties, do_copy=False):
"""Merge the supplied properties into the _properties dictionary.
The input properties must adhere to the class schema or a KeyError or
TypeError exception will be raised. If adding an object of an XCObject
subclass and the schema indicates a strong relationship, the object's
parent will be set to this object.
If do_copy is True, then lists, dicts, strong-owned XCObjects, and
strong-owned XCObjects in lists will be copied instead of having their
references added.
"""
if properties is None:
return
for property, value in properties.iteritems():
# Make sure the property is in the schema.
if not property in self._schema:
raise KeyError(property + ' not in ' + self.__class__.__name__)
# Make sure the property conforms to the schema.
(is_list, property_type, is_strong) = self._schema[property][0:3]
if is_list:
if value.__class__ != list:
raise TypeError(
property + ' of ' + self.__class__.__name__ + \
' must be list, not ' + value.__class__.__name__)
for item in value:
if not isinstance(item, property_type) and \
not (item.__class__ == unicode and property_type == str):
# Accept unicode where str is specified. str is treated as
# UTF-8-encoded.
raise TypeError(
'item of ' + property + ' of ' + self.__class__.__name__ + \
' must be ' + property_type.__name__ + ', not ' + \
item.__class__.__name__)
elif not isinstance(value, property_type) and \
not (value.__class__ == unicode and property_type == str):
# Accept unicode where str is specified. str is treated as
# UTF-8-encoded.
raise TypeError(
property + ' of ' + self.__class__.__name__ + ' must be ' + \
property_type.__name__ + ', not ' + value.__class__.__name__)
# Checks passed, perform the assignment.
if do_copy:
if isinstance(value, XCObject):
if is_strong:
self._properties[property] = value.Copy()
else:
self._properties[property] = value
elif isinstance(value, str) or isinstance(value, unicode) or \
isinstance(value, int):
self._properties[property] = value
elif isinstance(value, list):
if is_strong:
# If is_strong is True, each element is an XCObject, so it's safe
# to call Copy.
self._properties[property] = []
for item in value:
self._properties[property].append(item.Copy())
else:
self._properties[property] = value[:]
elif isinstance(value, dict):
self._properties[property] = value.copy()
else:
raise TypeError("Don't know how to copy a " + \
value.__class__.__name__ + ' object for ' + \
property + ' in ' + self.__class__.__name__)
else:
self._properties[property] = value
# Set up the child's back-reference to this object. Don't use |value|
# any more because it may not be right if do_copy is true.
if is_strong:
if not is_list:
self._properties[property].parent = self
else:
for item in self._properties[property]:
item.parent = self
def HasProperty(self, key):
return key in self._properties
def GetProperty(self, key):
return self._properties[key]
def SetProperty(self, key, value):
self.UpdateProperties({key: value})
def DelProperty(self, key):
if key in self._properties:
del self._properties[key]
def AppendProperty(self, key, value):
# TODO(mark): Support ExtendProperty too (and make this call that)?
# Schema validation.
if not key in self._schema:
raise KeyError(key + ' not in ' + self.__class__.__name__)
(is_list, property_type, is_strong) = self._schema[key][0:3]
if not is_list:
raise TypeError(key + ' of ' + self.__class__.__name__ + ' must be list')
if not isinstance(value, property_type):
raise TypeError('item of ' + key + ' of ' + self.__class__.__name__ + \
' must be ' + property_type.__name__ + ', not ' + \
value.__class__.__name__)
# If the property doesn't exist yet, create a new empty list to receive the
# item.
if not key in self._properties:
self._properties[key] = []
# Set up the ownership link.
if is_strong:
value.parent = self
# Store the item.
self._properties[key].append(value)
def VerifyHasRequiredProperties(self):
"""Ensure that all properties identified as required by the schema are
set.
"""
# TODO(mark): A stronger verification mechanism is needed. Some
# subclasses need to perform validation beyond what the schema can enforce.
for property, attributes in self._schema.iteritems():
(is_list, property_type, is_strong, is_required) = attributes[0:4]
if is_required and not property in self._properties:
raise KeyError(self.__class__.__name__ + ' requires ' + property)
def _SetDefaultsFromSchema(self):
"""Assign object default values according to the schema. This will not
overwrite properties that have already been set."""
defaults = {}
for property, attributes in self._schema.iteritems():
(is_list, property_type, is_strong, is_required) = attributes[0:4]
if is_required and len(attributes) >= 5 and \
not property in self._properties:
default = attributes[4]
defaults[property] = default
if len(defaults) > 0:
# Use do_copy=True so that each new object gets its own copy of strong
# objects, lists, and dicts.
self.UpdateProperties(defaults, do_copy=True)
class XCHierarchicalElement(XCObject):
"""Abstract base for PBXGroup and PBXFileReference. Not represented in a
project file."""
# TODO(mark): Do name and path belong here? Probably so.
# If path is set and name is not, name may have a default value. Name will
# be set to the basename of path, if the basename of path is different from
# the full value of path. If path is already just a leaf name, name will
# not be set.
_schema = XCObject._schema.copy()
_schema.update({
'comments': [0, str, 0, 0],
'fileEncoding': [0, str, 0, 0],
'includeInIndex': [0, int, 0, 0],
'indentWidth': [0, int, 0, 0],
'lineEnding': [0, int, 0, 0],
'sourceTree': [0, str, 0, 1, '<group>'],
'tabWidth': [0, int, 0, 0],
'usesTabs': [0, int, 0, 0],
'wrapsLines': [0, int, 0, 0],
})
def __init__(self, properties=None, id=None, parent=None):
# super
XCObject.__init__(self, properties, id, parent)
if 'path' in self._properties and not 'name' in self._properties:
path = self._properties['path']
name = posixpath.basename(path)
if name != '' and path != name:
self.SetProperty('name', name)
if 'path' in self._properties and \
(not 'sourceTree' in self._properties or \
self._properties['sourceTree'] == '<group>'):
# If the pathname begins with an Xcode variable like "$(SDKROOT)/", take
# the variable out and make the path be relative to that variable by
# assigning the variable name as the sourceTree.
(source_tree, path) = SourceTreeAndPathFromPath(self._properties['path'])
if source_tree != None:
self._properties['sourceTree'] = source_tree
if path != None:
self._properties['path'] = path
if source_tree != None and path is None and \
not 'name' in self._properties:
# The path was of the form "$(SDKROOT)" with no path following it.
# This object is now relative to that variable, so it has no path
# attribute of its own. It does, however, keep a name.
del self._properties['path']
self._properties['name'] = source_tree
def Name(self):
if 'name' in self._properties:
return self._properties['name']
elif 'path' in self._properties:
return self._properties['path']
else:
# This happens in the case of the root PBXGroup.
return None
def Hashables(self):
"""Custom hashables for XCHierarchicalElements.
XCHierarchicalElements are special. Generally, their hashes shouldn't
change if the paths don't change. The normal XCObject implementation of
Hashables adds a hashable for each object, which means that if
the hierarchical structure changes (possibly due to changes caused when
TakeOverOnlyChild runs and encounters slight changes in the hierarchy),
the hashes will change. For example, if a project file initially contains
a/b/f1 and a/b becomes collapsed into a/b, f1 will have a single parent
a/b. If someone later adds a/f2 to the project file, a/b can no longer be
collapsed, and f1 winds up with parent b and grandparent a. That would
be sufficient to change f1's hash.
To counteract this problem, hashables for all XCHierarchicalElements except
for the main group (which has neither a name nor a path) are taken to be
just the set of path components. Because hashables are inherited from
parents, this provides assurance that a/b/f1 has the same set of hashables
whether its parent is b or a/b.
The main group is a special case. As it is permitted to have no name or
path, it is permitted to use the standard XCObject hash mechanism. This
is not considered a problem because there can be only one main group.
"""
if self == self.PBXProjectAncestor()._properties['mainGroup']:
# super
return XCObject.Hashables(self)
hashables = []
# Put the name in first, ensuring that if TakeOverOnlyChild collapses
# children into a top-level group like "Source", the name always goes
# into the list of hashables without interfering with path components.
if 'name' in self._properties:
# Make it less likely for people to manipulate hashes by following the
# pattern of always pushing an object type value onto the list first.
hashables.append(self.__class__.__name__ + '.name')
hashables.append(self._properties['name'])
# NOTE: This still has the problem that if an absolute path is encountered,
# including paths with a sourceTree, they'll still inherit their parents'
# hashables, even though the paths aren't relative to their parents. This
# is not expected to be much of a problem in practice.
path = self.PathFromSourceTreeAndPath()
if path != None:
components = path.split(posixpath.sep)
for component in components:
hashables.append(self.__class__.__name__ + '.path')
hashables.append(component)
hashables.extend(self._hashables)
return hashables
def Compare(self, other):
# Allow comparison of these types. PBXGroup has the highest sort rank;
# PBXVariantGroup is treated as equal to PBXFileReference.
valid_class_types = {
PBXFileReference: 'file',
PBXGroup: 'group',
PBXVariantGroup: 'file',
}
self_type = valid_class_types[self.__class__]
other_type = valid_class_types[other.__class__]
if self_type == other_type:
# If the two objects are of the same sort rank, compare their names.
return cmp(self.Name(), other.Name())
# Otherwise, sort groups before everything else.
if self_type == 'group':
return -1
return 1
def CompareRootGroup(self, other):
# This function should be used only to compare direct children of the
# containing PBXProject's mainGroup. These groups should appear in the
# listed order.
# TODO(mark): "Build" is used by gyp.generator.xcode, perhaps the
# generator should have a way of influencing this list rather than having
# to hardcode for the generator here.
order = ['Source', 'Intermediates', 'Projects', 'Frameworks', 'Products',
'Build']
# If the groups aren't in the listed order, do a name comparison.
# Otherwise, groups in the listed order should come before those that
# aren't.
self_name = self.Name()
other_name = other.Name()
self_in = isinstance(self, PBXGroup) and self_name in order
other_in = isinstance(self, PBXGroup) and other_name in order
if not self_in and not other_in:
return self.Compare(other)
if self_name in order and not other_name in order:
return -1
if other_name in order and not self_name in order:
return 1
# If both groups are in the listed order, go by the defined order.
self_index = order.index(self_name)
other_index = order.index(other_name)
if self_index < other_index:
return -1
if self_index > other_index:
return 1
return 0
def PathFromSourceTreeAndPath(self):
# Turn the object's sourceTree and path properties into a single flat
# string of a form comparable to the path parameter. If there's a
# sourceTree property other than "<group>", wrap it in $(...) for the
# comparison.
components = []
if self._properties['sourceTree'] != '<group>':
components.append('$(' + self._properties['sourceTree'] + ')')
if 'path' in self._properties:
components.append(self._properties['path'])
if len(components) > 0:
return posixpath.join(*components)
return None
def FullPath(self):
# Returns a full path to self relative to the project file, or relative
# to some other source tree. Start with self, and walk up the chain of
# parents prepending their paths, if any, until no more parents are
# available (project-relative path) or until a path relative to some
# source tree is found.
xche = self
path = None
while isinstance(xche, XCHierarchicalElement) and \
(path is None or \
(not path.startswith('/') and not path.startswith('$'))):
this_path = xche.PathFromSourceTreeAndPath()
if this_path != None and path != None:
path = posixpath.join(this_path, path)
elif this_path != None:
path = this_path
xche = xche.parent
return path
class PBXGroup(XCHierarchicalElement):
"""
Attributes:
_children_by_path: Maps pathnames of children of this PBXGroup to the
actual child XCHierarchicalElement objects.
_variant_children_by_name_and_path: Maps (name, path) tuples of
PBXVariantGroup children to the actual child PBXVariantGroup objects.
"""
_schema = XCHierarchicalElement._schema.copy()
_schema.update({
'children': [1, XCHierarchicalElement, 1, 1, []],
'name': [0, str, 0, 0],
'path': [0, str, 0, 0],
})
def __init__(self, properties=None, id=None, parent=None):
# super
XCHierarchicalElement.__init__(self, properties, id, parent)
self._children_by_path = {}
self._variant_children_by_name_and_path = {}
for child in self._properties.get('children', []):
self._AddChildToDicts(child)
def Hashables(self):
# super
hashables = XCHierarchicalElement.Hashables(self)
# It is not sufficient to just rely on name and parent to build a unique
# hashable : a node could have two child PBXGroup sharing a common name.
# To add entropy the hashable is enhanced with the names of all its
# children.
for child in self._properties.get('children', []):
child_name = child.Name()
if child_name != None:
hashables.append(child_name)
return hashables
def HashablesForChild(self):
# To avoid a circular reference the hashables used to compute a child id do
# not include the child names.
return XCHierarchicalElement.Hashables(self)
def _AddChildToDicts(self, child):
# Sets up this PBXGroup object's dicts to reference the child properly.
child_path = child.PathFromSourceTreeAndPath()
if child_path:
if child_path in self._children_by_path:
raise ValueError('Found multiple children with path ' + child_path)
self._children_by_path[child_path] = child
if isinstance(child, PBXVariantGroup):
child_name = child._properties.get('name', None)
key = (child_name, child_path)
if key in self._variant_children_by_name_and_path:
raise ValueError('Found multiple PBXVariantGroup children with ' + \
'name ' + str(child_name) + ' and path ' + \
str(child_path))
self._variant_children_by_name_and_path[key] = child
def AppendChild(self, child):
# Callers should use this instead of calling
# AppendProperty('children', child) directly because this function
# maintains the group's dicts.
self.AppendProperty('children', child)
self._AddChildToDicts(child)
def GetChildByName(self, name):
# This is not currently optimized with a dict as GetChildByPath is because
# it has few callers. Most callers probably want GetChildByPath. This
# function is only useful to get children that have names but no paths,
# which is rare. The children of the main group ("Source", "Products",
# etc.) is pretty much the only case where this likely to come up.
#
# TODO(mark): Maybe this should raise an error if more than one child is
# present with the same name.
if not 'children' in self._properties:
return None
for child in self._properties['children']:
if child.Name() == name:
return child
return None
def GetChildByPath(self, path):
if not path:
return None
if path in self._children_by_path:
return self._children_by_path[path]
return None
def GetChildByRemoteObject(self, remote_object):
# This method is a little bit esoteric. Given a remote_object, which
# should be a PBXFileReference in another project file, this method will
# return this group's PBXReferenceProxy object serving as a local proxy
# for the remote PBXFileReference.
#
# This function might benefit from a dict optimization as GetChildByPath
# for some workloads, but profiling shows that it's not currently a
# problem.
if not 'children' in self._properties:
return None
for child in self._properties['children']:
if not isinstance(child, PBXReferenceProxy):
continue
container_proxy = child._properties['remoteRef']
if container_proxy._properties['remoteGlobalIDString'] == remote_object:
return child
return None
def AddOrGetFileByPath(self, path, hierarchical):
"""Returns an existing or new file reference corresponding to path.
If hierarchical is True, this method will create or use the necessary
hierarchical group structure corresponding to path. Otherwise, it will
look in and create an item in the current group only.
If an existing matching reference is found, it is returned, otherwise, a
new one will be created, added to the correct group, and returned.
If path identifies a directory by virtue of carrying a trailing slash,
this method returns a PBXFileReference of "folder" type. If path
identifies a variant, by virtue of it identifying a file inside a directory
with an ".lproj" extension, this method returns a PBXVariantGroup
containing the variant named by path, and possibly other variants. For
all other paths, a "normal" PBXFileReference will be returned.
"""
# Adding or getting a directory? Directories end with a trailing slash.
is_dir = False
if path.endswith('/'):
is_dir = True
path = posixpath.normpath(path)
if is_dir:
path = path + '/'
# Adding or getting a variant? Variants are files inside directories
# with an ".lproj" extension. Xcode uses variants for localization. For
# a variant path/to/Language.lproj/MainMenu.nib, put a variant group named
# MainMenu.nib inside path/to, and give it a variant named Language. In
# this example, grandparent would be set to path/to and parent_root would
# be set to Language.
variant_name = None
parent = posixpath.dirname(path)
grandparent = posixpath.dirname(parent)
parent_basename = posixpath.basename(parent)
(parent_root, parent_ext) = posixpath.splitext(parent_basename)
if parent_ext == '.lproj':
variant_name = parent_root
if grandparent == '':
grandparent = None
# Putting a directory inside a variant group is not currently supported.
assert not is_dir or variant_name is None
path_split = path.split(posixpath.sep)
if len(path_split) == 1 or \
((is_dir or variant_name != None) and len(path_split) == 2) or \
not hierarchical:
# The PBXFileReference or PBXVariantGroup will be added to or gotten from
# this PBXGroup, no recursion necessary.
if variant_name is None:
# Add or get a PBXFileReference.
file_ref = self.GetChildByPath(path)
if file_ref != None:
assert file_ref.__class__ == PBXFileReference
else:
file_ref = PBXFileReference({'path': path})
self.AppendChild(file_ref)
else:
# Add or get a PBXVariantGroup. The variant group name is the same
# as the basename (MainMenu.nib in the example above). grandparent
# specifies the path to the variant group itself, and path_split[-2:]
# is the path of the specific variant relative to its group.
variant_group_name = posixpath.basename(path)
variant_group_ref = self.AddOrGetVariantGroupByNameAndPath(
variant_group_name, grandparent)
variant_path = posixpath.sep.join(path_split[-2:])
variant_ref = variant_group_ref.GetChildByPath(variant_path)
if variant_ref != None:
assert variant_ref.__class__ == PBXFileReference
else:
variant_ref = PBXFileReference({'name': variant_name,
'path': variant_path})
variant_group_ref.AppendChild(variant_ref)
# The caller is interested in the variant group, not the specific
# variant file.
file_ref = variant_group_ref
return file_ref
else:
# Hierarchical recursion. Add or get a PBXGroup corresponding to the
# outermost path component, and then recurse into it, chopping off that
# path component.
next_dir = path_split[0]
group_ref = self.GetChildByPath(next_dir)
if group_ref != None:
assert group_ref.__class__ == PBXGroup
else:
group_ref = PBXGroup({'path': next_dir})
self.AppendChild(group_ref)
return group_ref.AddOrGetFileByPath(posixpath.sep.join(path_split[1:]),
hierarchical)
def AddOrGetVariantGroupByNameAndPath(self, name, path):
"""Returns an existing or new PBXVariantGroup for name and path.
If a PBXVariantGroup identified by the name and path arguments is already
present as a child of this object, it is returned. Otherwise, a new
PBXVariantGroup with the correct properties is created, added as a child,
and returned.
This method will generally be called by AddOrGetFileByPath, which knows
when to create a variant group based on the structure of the pathnames
passed to it.
"""
key = (name, path)
if key in self._variant_children_by_name_and_path:
variant_group_ref = self._variant_children_by_name_and_path[key]
assert variant_group_ref.__class__ == PBXVariantGroup
return variant_group_ref
variant_group_properties = {'name': name}
if path != None:
variant_group_properties['path'] = path
variant_group_ref = PBXVariantGroup(variant_group_properties)
self.AppendChild(variant_group_ref)
return variant_group_ref
def TakeOverOnlyChild(self, recurse=False):
"""If this PBXGroup has only one child and it's also a PBXGroup, take
it over by making all of its children this object's children.
This function will continue to take over only children when those children
are groups. If there are three PBXGroups representing a, b, and c, with
c inside b and b inside a, and a and b have no other children, this will
result in a taking over both b and c, forming a PBXGroup for a/b/c.
If recurse is True, this function will recurse into children and ask them
to collapse themselves by taking over only children as well. Assuming
an example hierarchy with files at a/b/c/d1, a/b/c/d2, and a/b/c/d3/e/f
(d1, d2, and f are files, the rest are groups), recursion will result in
a group for a/b/c containing a group for d3/e.
"""
# At this stage, check that child class types are PBXGroup exactly,
# instead of using isinstance. The only subclass of PBXGroup,
# PBXVariantGroup, should not participate in reparenting in the same way:
# reparenting by merging different object types would be wrong.
while len(self._properties['children']) == 1 and \
self._properties['children'][0].__class__ == PBXGroup:
# Loop to take over the innermost only-child group possible.
child = self._properties['children'][0]
# Assume the child's properties, including its children. Save a copy
# of this object's old properties, because they'll still be needed.
# This object retains its existing id and parent attributes.
old_properties = self._properties
self._properties = child._properties
self._children_by_path = child._children_by_path
if not 'sourceTree' in self._properties or \
self._properties['sourceTree'] == '<group>':
# The child was relative to its parent. Fix up the path. Note that
# children with a sourceTree other than "<group>" are not relative to
# their parents, so no path fix-up is needed in that case.
if 'path' in old_properties:
if 'path' in self._properties:
# Both the original parent and child have paths set.
self._properties['path'] = posixpath.join(old_properties['path'],
self._properties['path'])
else:
# Only the original parent has a path, use it.
self._properties['path'] = old_properties['path']
if 'sourceTree' in old_properties:
# The original parent had a sourceTree set, use it.
self._properties['sourceTree'] = old_properties['sourceTree']
# If the original parent had a name set, keep using it. If the original
# parent didn't have a name but the child did, let the child's name
# live on. If the name attribute seems unnecessary now, get rid of it.
if 'name' in old_properties and old_properties['name'] != None and \
old_properties['name'] != self.Name():
self._properties['name'] = old_properties['name']
if 'name' in self._properties and 'path' in self._properties and \
self._properties['name'] == self._properties['path']:
del self._properties['name']
# Notify all children of their new parent.
for child in self._properties['children']:
child.parent = self
# If asked to recurse, recurse.
if recurse:
for child in self._properties['children']:
if child.__class__ == PBXGroup:
child.TakeOverOnlyChild(recurse)
def SortGroup(self):
self._properties['children'] = \
sorted(self._properties['children'], cmp=lambda x,y: x.Compare(y))
# Recurse.
for child in self._properties['children']:
if isinstance(child, PBXGroup):
child.SortGroup()
class XCFileLikeElement(XCHierarchicalElement):
# Abstract base for objects that can be used as the fileRef property of
# PBXBuildFile.
def PathHashables(self):
# A PBXBuildFile that refers to this object will call this method to
# obtain additional hashables specific to this XCFileLikeElement. Don't
# just use this object's hashables, they're not specific and unique enough
# on their own (without access to the parent hashables.) Instead, provide
# hashables that identify this object by path by getting its hashables as
# well as the hashables of ancestor XCHierarchicalElement objects.
hashables = []
xche = self
while xche != None and isinstance(xche, XCHierarchicalElement):
xche_hashables = xche.Hashables()
for index in xrange(0, len(xche_hashables)):
hashables.insert(index, xche_hashables[index])
xche = xche.parent
return hashables
class XCContainerPortal(XCObject):
# Abstract base for objects that can be used as the containerPortal property
# of PBXContainerItemProxy.
pass
class XCRemoteObject(XCObject):
# Abstract base for objects that can be used as the remoteGlobalIDString
# property of PBXContainerItemProxy.
pass
class PBXFileReference(XCFileLikeElement, XCContainerPortal, XCRemoteObject):
_schema = XCFileLikeElement._schema.copy()
_schema.update({
'explicitFileType': [0, str, 0, 0],
'lastKnownFileType': [0, str, 0, 0],
'name': [0, str, 0, 0],
'path': [0, str, 0, 1],
})
# Weird output rules for PBXFileReference.
_should_print_single_line = True
# super
_encode_transforms = XCFileLikeElement._alternate_encode_transforms
def __init__(self, properties=None, id=None, parent=None):
# super
XCFileLikeElement.__init__(self, properties, id, parent)
if 'path' in self._properties and self._properties['path'].endswith('/'):
self._properties['path'] = self._properties['path'][:-1]
is_dir = True
else:
is_dir = False
if 'path' in self._properties and \
not 'lastKnownFileType' in self._properties and \
not 'explicitFileType' in self._properties:
# TODO(mark): This is the replacement for a replacement for a quick hack.
# It is no longer incredibly sucky, but this list needs to be extended.
extension_map = {
'a': 'archive.ar',
'app': 'wrapper.application',
'bdic': 'file',
'bundle': 'wrapper.cfbundle',
'c': 'sourcecode.c.c',
'cc': 'sourcecode.cpp.cpp',
'cpp': 'sourcecode.cpp.cpp',
'css': 'text.css',
'cxx': 'sourcecode.cpp.cpp',
'dart': 'sourcecode',
'dylib': 'compiled.mach-o.dylib',
'framework': 'wrapper.framework',
'gyp': 'sourcecode',
'gypi': 'sourcecode',
'h': 'sourcecode.c.h',
'hxx': 'sourcecode.cpp.h',
'icns': 'image.icns',
'java': 'sourcecode.java',
'js': 'sourcecode.javascript',
'kext': 'wrapper.kext',
'm': 'sourcecode.c.objc',
'mm': 'sourcecode.cpp.objcpp',
'nib': 'wrapper.nib',
'o': 'compiled.mach-o.objfile',
'pdf': 'image.pdf',
'pl': 'text.script.perl',
'plist': 'text.plist.xml',
'pm': 'text.script.perl',
'png': 'image.png',
'py': 'text.script.python',
'r': 'sourcecode.rez',
'rez': 'sourcecode.rez',
's': 'sourcecode.asm',
'storyboard': 'file.storyboard',
'strings': 'text.plist.strings',
'swift': 'sourcecode.swift',
'ttf': 'file',
'xcassets': 'folder.assetcatalog',
'xcconfig': 'text.xcconfig',
'xcdatamodel': 'wrapper.xcdatamodel',
'xcdatamodeld':'wrapper.xcdatamodeld',
'xib': 'file.xib',
'y': 'sourcecode.yacc',
}
prop_map = {
'dart': 'explicitFileType',
'gyp': 'explicitFileType',
'gypi': 'explicitFileType',
}
if is_dir:
file_type = 'folder'
prop_name = 'lastKnownFileType'
else:
basename = posixpath.basename(self._properties['path'])
(root, ext) = posixpath.splitext(basename)
# Check the map using a lowercase extension.
# TODO(mark): Maybe it should try with the original case first and fall
# back to lowercase, in case there are any instances where case
# matters. There currently aren't.
if ext != '':
ext = ext[1:].lower()
# TODO(mark): "text" is the default value, but "file" is appropriate
# for unrecognized files not containing text. Xcode seems to choose
# based on content.
file_type = extension_map.get(ext, 'text')
prop_name = prop_map.get(ext, 'lastKnownFileType')
self._properties[prop_name] = file_type
class PBXVariantGroup(PBXGroup, XCFileLikeElement):
"""PBXVariantGroup is used by Xcode to represent localizations."""
# No additions to the schema relative to PBXGroup.
pass
# PBXReferenceProxy is also an XCFileLikeElement subclass. It is defined below
# because it uses PBXContainerItemProxy, defined below.
class XCBuildConfiguration(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'baseConfigurationReference': [0, PBXFileReference, 0, 0],
'buildSettings': [0, dict, 0, 1, {}],
'name': [0, str, 0, 1],
})
def HasBuildSetting(self, key):
return key in self._properties['buildSettings']
def GetBuildSetting(self, key):
return self._properties['buildSettings'][key]
def SetBuildSetting(self, key, value):
# TODO(mark): If a list, copy?
self._properties['buildSettings'][key] = value
def AppendBuildSetting(self, key, value):
if not key in self._properties['buildSettings']:
self._properties['buildSettings'][key] = []
self._properties['buildSettings'][key].append(value)
def DelBuildSetting(self, key):
if key in self._properties['buildSettings']:
del self._properties['buildSettings'][key]
def SetBaseConfiguration(self, value):
self._properties['baseConfigurationReference'] = value
class XCConfigurationList(XCObject):
# _configs is the default list of configurations.
_configs = [ XCBuildConfiguration({'name': 'Debug'}),
XCBuildConfiguration({'name': 'Release'}) ]
_schema = XCObject._schema.copy()
_schema.update({
'buildConfigurations': [1, XCBuildConfiguration, 1, 1, _configs],
'defaultConfigurationIsVisible': [0, int, 0, 1, 1],
'defaultConfigurationName': [0, str, 0, 1, 'Release'],
})
def Name(self):
return 'Build configuration list for ' + \
self.parent.__class__.__name__ + ' "' + self.parent.Name() + '"'
def ConfigurationNamed(self, name):
"""Convenience accessor to obtain an XCBuildConfiguration by name."""
for configuration in self._properties['buildConfigurations']:
if configuration._properties['name'] == name:
return configuration
raise KeyError(name)
def DefaultConfiguration(self):
"""Convenience accessor to obtain the default XCBuildConfiguration."""
return self.ConfigurationNamed(self._properties['defaultConfigurationName'])
def HasBuildSetting(self, key):
"""Determines the state of a build setting in all XCBuildConfiguration
child objects.
If all child objects have key in their build settings, and the value is the
same in all child objects, returns 1.
If no child objects have the key in their build settings, returns 0.
If some, but not all, child objects have the key in their build settings,
or if any children have different values for the key, returns -1.
"""
has = None
value = None
for configuration in self._properties['buildConfigurations']:
configuration_has = configuration.HasBuildSetting(key)
if has is None:
has = configuration_has
elif has != configuration_has:
return -1
if configuration_has:
configuration_value = configuration.GetBuildSetting(key)
if value is None:
value = configuration_value
elif value != configuration_value:
return -1
if not has:
return 0
return 1
def GetBuildSetting(self, key):
"""Gets the build setting for key.
All child XCConfiguration objects must have the same value set for the
setting, or a ValueError will be raised.
"""
# TODO(mark): This is wrong for build settings that are lists. The list
# contents should be compared (and a list copy returned?)
value = None
for configuration in self._properties['buildConfigurations']:
configuration_value = configuration.GetBuildSetting(key)
if value is None:
value = configuration_value
else:
if value != configuration_value:
raise ValueError('Variant values for ' + key)
return value
def SetBuildSetting(self, key, value):
"""Sets the build setting for key to value in all child
XCBuildConfiguration objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.SetBuildSetting(key, value)
def AppendBuildSetting(self, key, value):
"""Appends value to the build setting for key, which is treated as a list,
in all child XCBuildConfiguration objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.AppendBuildSetting(key, value)
def DelBuildSetting(self, key):
"""Deletes the build setting key from all child XCBuildConfiguration
objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.DelBuildSetting(key)
def SetBaseConfiguration(self, value):
"""Sets the build configuration in all child XCBuildConfiguration objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.SetBaseConfiguration(value)
class PBXBuildFile(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'fileRef': [0, XCFileLikeElement, 0, 1],
'settings': [0, str, 0, 0], # hack, it's a dict
})
# Weird output rules for PBXBuildFile.
_should_print_single_line = True
_encode_transforms = XCObject._alternate_encode_transforms
def Name(self):
# Example: "main.cc in Sources"
return self._properties['fileRef'].Name() + ' in ' + self.parent.Name()
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# It is not sufficient to just rely on Name() to get the
# XCFileLikeElement's name, because that is not a complete pathname.
# PathHashables returns hashables unique enough that no two
# PBXBuildFiles should wind up with the same set of hashables, unless
# someone adds the same file multiple times to the same target. That
# would be considered invalid anyway.
hashables.extend(self._properties['fileRef'].PathHashables())
return hashables
class XCBuildPhase(XCObject):
"""Abstract base for build phase classes. Not represented in a project
file.
Attributes:
_files_by_path: A dict mapping each path of a child in the files list by
path (keys) to the corresponding PBXBuildFile children (values).
_files_by_xcfilelikeelement: A dict mapping each XCFileLikeElement (keys)
to the corresponding PBXBuildFile children (values).
"""
# TODO(mark): Some build phase types, like PBXShellScriptBuildPhase, don't
# actually have a "files" list. XCBuildPhase should not have "files" but
# another abstract subclass of it should provide this, and concrete build
# phase types that do have "files" lists should be derived from that new
# abstract subclass. XCBuildPhase should only provide buildActionMask and
# runOnlyForDeploymentPostprocessing, and not files or the various
# file-related methods and attributes.
_schema = XCObject._schema.copy()
_schema.update({
'buildActionMask': [0, int, 0, 1, 0x7fffffff],
'files': [1, PBXBuildFile, 1, 1, []],
'runOnlyForDeploymentPostprocessing': [0, int, 0, 1, 0],
})
def __init__(self, properties=None, id=None, parent=None):
# super
XCObject.__init__(self, properties, id, parent)
self._files_by_path = {}
self._files_by_xcfilelikeelement = {}
for pbxbuildfile in self._properties.get('files', []):
self._AddBuildFileToDicts(pbxbuildfile)
def FileGroup(self, path):
# Subclasses must override this by returning a two-element tuple. The
# first item in the tuple should be the PBXGroup to which "path" should be
# added, either as a child or deeper descendant. The second item should
# be a boolean indicating whether files should be added into hierarchical
# groups or one single flat group.
raise NotImplementedError(
self.__class__.__name__ + ' must implement FileGroup')
def _AddPathToDict(self, pbxbuildfile, path):
"""Adds path to the dict tracking paths belonging to this build phase.
If the path is already a member of this build phase, raises an exception.
"""
if path in self._files_by_path:
raise ValueError('Found multiple build files with path ' + path)
self._files_by_path[path] = pbxbuildfile
def _AddBuildFileToDicts(self, pbxbuildfile, path=None):
"""Maintains the _files_by_path and _files_by_xcfilelikeelement dicts.
If path is specified, then it is the path that is being added to the
phase, and pbxbuildfile must contain either a PBXFileReference directly
referencing that path, or it must contain a PBXVariantGroup that itself
contains a PBXFileReference referencing the path.
If path is not specified, either the PBXFileReference's path or the paths
of all children of the PBXVariantGroup are taken as being added to the
phase.
If the path is already present in the phase, raises an exception.
If the PBXFileReference or PBXVariantGroup referenced by pbxbuildfile
are already present in the phase, referenced by a different PBXBuildFile
object, raises an exception. This does not raise an exception when
a PBXFileReference or PBXVariantGroup reappear and are referenced by the
same PBXBuildFile that has already introduced them, because in the case
of PBXVariantGroup objects, they may correspond to multiple paths that are
not all added simultaneously. When this situation occurs, the path needs
to be added to _files_by_path, but nothing needs to change in
_files_by_xcfilelikeelement, and the caller should have avoided adding
the PBXBuildFile if it is already present in the list of children.
"""
xcfilelikeelement = pbxbuildfile._properties['fileRef']
paths = []
if path != None:
# It's best when the caller provides the path.
if isinstance(xcfilelikeelement, PBXVariantGroup):
paths.append(path)
else:
# If the caller didn't provide a path, there can be either multiple
# paths (PBXVariantGroup) or one.
if isinstance(xcfilelikeelement, PBXVariantGroup):
for variant in xcfilelikeelement._properties['children']:
paths.append(variant.FullPath())
else:
paths.append(xcfilelikeelement.FullPath())
# Add the paths first, because if something's going to raise, the
# messages provided by _AddPathToDict are more useful owing to its
# having access to a real pathname and not just an object's Name().
for a_path in paths:
self._AddPathToDict(pbxbuildfile, a_path)
# If another PBXBuildFile references this XCFileLikeElement, there's a
# problem.
if xcfilelikeelement in self._files_by_xcfilelikeelement and \
self._files_by_xcfilelikeelement[xcfilelikeelement] != pbxbuildfile:
raise ValueError('Found multiple build files for ' + \
xcfilelikeelement.Name())
self._files_by_xcfilelikeelement[xcfilelikeelement] = pbxbuildfile
def AppendBuildFile(self, pbxbuildfile, path=None):
# Callers should use this instead of calling
# AppendProperty('files', pbxbuildfile) directly because this function
# maintains the object's dicts. Better yet, callers can just call AddFile
# with a pathname and not worry about building their own PBXBuildFile
# objects.
self.AppendProperty('files', pbxbuildfile)
self._AddBuildFileToDicts(pbxbuildfile, path)
def AddFile(self, path, settings=None):
(file_group, hierarchical) = self.FileGroup(path)
file_ref = file_group.AddOrGetFileByPath(path, hierarchical)
if file_ref in self._files_by_xcfilelikeelement and \
isinstance(file_ref, PBXVariantGroup):
# There's already a PBXBuildFile in this phase corresponding to the
# PBXVariantGroup. path just provides a new variant that belongs to
# the group. Add the path to the dict.
pbxbuildfile = self._files_by_xcfilelikeelement[file_ref]
self._AddBuildFileToDicts(pbxbuildfile, path)
else:
# Add a new PBXBuildFile to get file_ref into the phase.
if settings is None:
pbxbuildfile = PBXBuildFile({'fileRef': file_ref})
else:
pbxbuildfile = PBXBuildFile({'fileRef': file_ref, 'settings': settings})
self.AppendBuildFile(pbxbuildfile, path)
class PBXHeadersBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Headers'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
class PBXResourcesBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Resources'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
class PBXSourcesBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Sources'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
class PBXFrameworksBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Frameworks'
def FileGroup(self, path):
(root, ext) = posixpath.splitext(path)
if ext != '':
ext = ext[1:].lower()
if ext == 'o':
# .o files are added to Xcode Frameworks phases, but conceptually aren't
# frameworks, they're more like sources or intermediates. Redirect them
# to show up in one of those other groups.
return self.PBXProjectAncestor().RootGroupForPath(path)
else:
return (self.PBXProjectAncestor().FrameworksGroup(), False)
class PBXShellScriptBuildPhase(XCBuildPhase):
_schema = XCBuildPhase._schema.copy()
_schema.update({
'inputPaths': [1, str, 0, 1, []],
'name': [0, str, 0, 0],
'outputPaths': [1, str, 0, 1, []],
'shellPath': [0, str, 0, 1, '/bin/sh'],
'shellScript': [0, str, 0, 1],
'showEnvVarsInLog': [0, int, 0, 0],
})
def Name(self):
if 'name' in self._properties:
return self._properties['name']
return 'ShellScript'
class PBXCopyFilesBuildPhase(XCBuildPhase):
_schema = XCBuildPhase._schema.copy()
_schema.update({
'dstPath': [0, str, 0, 1],
'dstSubfolderSpec': [0, int, 0, 1],
'name': [0, str, 0, 0],
})
# path_tree_re matches "$(DIR)/path" or just "$(DIR)". Match group 1 is
# "DIR", match group 3 is "path" or None.
path_tree_re = re.compile('^\\$\\((.*)\\)(/(.*)|)$')
# path_tree_to_subfolder maps names of Xcode variables to the associated
# dstSubfolderSpec property value used in a PBXCopyFilesBuildPhase object.
path_tree_to_subfolder = {
'BUILT_FRAMEWORKS_DIR': 10, # Frameworks Directory
'BUILT_PRODUCTS_DIR': 16, # Products Directory
# Other types that can be chosen via the Xcode UI.
# TODO(mark): Map Xcode variable names to these.
# : 1, # Wrapper
# : 6, # Executables: 6
# : 7, # Resources
# : 15, # Java Resources
# : 11, # Shared Frameworks
# : 12, # Shared Support
# : 13, # PlugIns
}
def Name(self):
if 'name' in self._properties:
return self._properties['name']
return 'CopyFiles'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
def SetDestination(self, path):
"""Set the dstSubfolderSpec and dstPath properties from path.
path may be specified in the same notation used for XCHierarchicalElements,
specifically, "$(DIR)/path".
"""
path_tree_match = self.path_tree_re.search(path)
if path_tree_match:
# Everything else needs to be relative to an Xcode variable.
path_tree = path_tree_match.group(1)
relative_path = path_tree_match.group(3)
if path_tree in self.path_tree_to_subfolder:
subfolder = self.path_tree_to_subfolder[path_tree]
if relative_path is None:
relative_path = ''
else:
# The path starts with an unrecognized Xcode variable
# name like $(SRCROOT). Xcode will still handle this
# as an "absolute path" that starts with the variable.
subfolder = 0
relative_path = path
elif path.startswith('/'):
# Special case. Absolute paths are in dstSubfolderSpec 0.
subfolder = 0
relative_path = path[1:]
else:
raise ValueError('Can\'t use path %s in a %s' % \
(path, self.__class__.__name__))
self._properties['dstPath'] = relative_path
self._properties['dstSubfolderSpec'] = subfolder
class PBXBuildRule(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'compilerSpec': [0, str, 0, 1],
'filePatterns': [0, str, 0, 0],
'fileType': [0, str, 0, 1],
'isEditable': [0, int, 0, 1, 1],
'outputFiles': [1, str, 0, 1, []],
'script': [0, str, 0, 0],
})
def Name(self):
# Not very inspired, but it's what Xcode uses.
return self.__class__.__name__
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# Use the hashables of the weak objects that this object refers to.
hashables.append(self._properties['fileType'])
if 'filePatterns' in self._properties:
hashables.append(self._properties['filePatterns'])
return hashables
class PBXContainerItemProxy(XCObject):
# When referencing an item in this project file, containerPortal is the
# PBXProject root object of this project file. When referencing an item in
# another project file, containerPortal is a PBXFileReference identifying
# the other project file.
#
# When serving as a proxy to an XCTarget (in this project file or another),
# proxyType is 1. When serving as a proxy to a PBXFileReference (in another
# project file), proxyType is 2. Type 2 is used for references to the
# producs of the other project file's targets.
#
# Xcode is weird about remoteGlobalIDString. Usually, it's printed without
# a comment, indicating that it's tracked internally simply as a string, but
# sometimes it's printed with a comment (usually when the object is initially
# created), indicating that it's tracked as a project file object at least
# sometimes. This module always tracks it as an object, but contains a hack
# to prevent it from printing the comment in the project file output. See
# _XCKVPrint.
_schema = XCObject._schema.copy()
_schema.update({
'containerPortal': [0, XCContainerPortal, 0, 1],
'proxyType': [0, int, 0, 1],
'remoteGlobalIDString': [0, XCRemoteObject, 0, 1],
'remoteInfo': [0, str, 0, 1],
})
def __repr__(self):
props = self._properties
name = '%s.gyp:%s' % (props['containerPortal'].Name(), props['remoteInfo'])
return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self))
def Name(self):
# Admittedly not the best name, but it's what Xcode uses.
return self.__class__.__name__
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# Use the hashables of the weak objects that this object refers to.
hashables.extend(self._properties['containerPortal'].Hashables())
hashables.extend(self._properties['remoteGlobalIDString'].Hashables())
return hashables
class PBXTargetDependency(XCObject):
# The "target" property accepts an XCTarget object, and obviously not
# NoneType. But XCTarget is defined below, so it can't be put into the
# schema yet. The definition of PBXTargetDependency can't be moved below
# XCTarget because XCTarget's own schema references PBXTargetDependency.
# Python doesn't deal well with this circular relationship, and doesn't have
# a real way to do forward declarations. To work around, the type of
# the "target" property is reset below, after XCTarget is defined.
#
# At least one of "name" and "target" is required.
_schema = XCObject._schema.copy()
_schema.update({
'name': [0, str, 0, 0],
'target': [0, None.__class__, 0, 0],
'targetProxy': [0, PBXContainerItemProxy, 1, 1],
})
def __repr__(self):
name = self._properties.get('name') or self._properties['target'].Name()
return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self))
def Name(self):
# Admittedly not the best name, but it's what Xcode uses.
return self.__class__.__name__
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# Use the hashables of the weak objects that this object refers to.
hashables.extend(self._properties['targetProxy'].Hashables())
return hashables
class PBXReferenceProxy(XCFileLikeElement):
_schema = XCFileLikeElement._schema.copy()
_schema.update({
'fileType': [0, str, 0, 1],
'path': [0, str, 0, 1],
'remoteRef': [0, PBXContainerItemProxy, 1, 1],
})
class XCTarget(XCRemoteObject):
# An XCTarget is really just an XCObject, the XCRemoteObject thing is just
# to allow PBXProject to be used in the remoteGlobalIDString property of
# PBXContainerItemProxy.
#
# Setting a "name" property at instantiation may also affect "productName",
# which may in turn affect the "PRODUCT_NAME" build setting in children of
# "buildConfigurationList". See __init__ below.
_schema = XCRemoteObject._schema.copy()
_schema.update({
'buildConfigurationList': [0, XCConfigurationList, 1, 1,
XCConfigurationList()],
'buildPhases': [1, XCBuildPhase, 1, 1, []],
'dependencies': [1, PBXTargetDependency, 1, 1, []],
'name': [0, str, 0, 1],
'productName': [0, str, 0, 1],
})
def __init__(self, properties=None, id=None, parent=None,
force_outdir=None, force_prefix=None, force_extension=None):
# super
XCRemoteObject.__init__(self, properties, id, parent)
# Set up additional defaults not expressed in the schema. If a "name"
# property was supplied, set "productName" if it is not present. Also set
# the "PRODUCT_NAME" build setting in each configuration, but only if
# the setting is not present in any build configuration.
if 'name' in self._properties:
if not 'productName' in self._properties:
self.SetProperty('productName', self._properties['name'])
if 'productName' in self._properties:
if 'buildConfigurationList' in self._properties:
configs = self._properties['buildConfigurationList']
if configs.HasBuildSetting('PRODUCT_NAME') == 0:
configs.SetBuildSetting('PRODUCT_NAME',
self._properties['productName'])
def AddDependency(self, other):
pbxproject = self.PBXProjectAncestor()
other_pbxproject = other.PBXProjectAncestor()
if pbxproject == other_pbxproject:
# Add a dependency to another target in the same project file.
container = PBXContainerItemProxy({'containerPortal': pbxproject,
'proxyType': 1,
'remoteGlobalIDString': other,
'remoteInfo': other.Name()})
dependency = PBXTargetDependency({'target': other,
'targetProxy': container})
self.AppendProperty('dependencies', dependency)
else:
# Add a dependency to a target in a different project file.
other_project_ref = \
pbxproject.AddOrGetProjectReference(other_pbxproject)[1]
container = PBXContainerItemProxy({
'containerPortal': other_project_ref,
'proxyType': 1,
'remoteGlobalIDString': other,
'remoteInfo': other.Name(),
})
dependency = PBXTargetDependency({'name': other.Name(),
'targetProxy': container})
self.AppendProperty('dependencies', dependency)
# Proxy all of these through to the build configuration list.
def ConfigurationNamed(self, name):
return self._properties['buildConfigurationList'].ConfigurationNamed(name)
def DefaultConfiguration(self):
return self._properties['buildConfigurationList'].DefaultConfiguration()
def HasBuildSetting(self, key):
return self._properties['buildConfigurationList'].HasBuildSetting(key)
def GetBuildSetting(self, key):
return self._properties['buildConfigurationList'].GetBuildSetting(key)
def SetBuildSetting(self, key, value):
return self._properties['buildConfigurationList'].SetBuildSetting(key, \
value)
def AppendBuildSetting(self, key, value):
return self._properties['buildConfigurationList'].AppendBuildSetting(key, \
value)
def DelBuildSetting(self, key):
return self._properties['buildConfigurationList'].DelBuildSetting(key)
# Redefine the type of the "target" property. See PBXTargetDependency._schema
# above.
PBXTargetDependency._schema['target'][1] = XCTarget
class PBXNativeTarget(XCTarget):
# buildPhases is overridden in the schema to be able to set defaults.
#
# NOTE: Contrary to most objects, it is advisable to set parent when
# constructing PBXNativeTarget. A parent of an XCTarget must be a PBXProject
# object. A parent reference is required for a PBXNativeTarget during
# construction to be able to set up the target defaults for productReference,
# because a PBXBuildFile object must be created for the target and it must
# be added to the PBXProject's mainGroup hierarchy.
_schema = XCTarget._schema.copy()
_schema.update({
'buildPhases': [1, XCBuildPhase, 1, 1,
[PBXSourcesBuildPhase(), PBXFrameworksBuildPhase()]],
'buildRules': [1, PBXBuildRule, 1, 1, []],
'productReference': [0, PBXFileReference, 0, 1],
'productType': [0, str, 0, 1],
})
# Mapping from Xcode product-types to settings. The settings are:
# filetype : used for explicitFileType in the project file
# prefix : the prefix for the file name
# suffix : the suffix for the file name
_product_filetypes = {
'com.apple.product-type.application': ['wrapper.application',
'', '.app'],
'com.apple.product-type.application.watchapp': ['wrapper.application',
'', '.app'],
'com.apple.product-type.watchkit-extension': ['wrapper.app-extension',
'', '.appex'],
'com.apple.product-type.app-extension': ['wrapper.app-extension',
'', '.appex'],
'com.apple.product-type.bundle': ['wrapper.cfbundle',
'', '.bundle'],
'com.apple.product-type.framework': ['wrapper.framework',
'', '.framework'],
'com.apple.product-type.library.dynamic': ['compiled.mach-o.dylib',
'lib', '.dylib'],
'com.apple.product-type.library.static': ['archive.ar',
'lib', '.a'],
'com.apple.product-type.tool': ['compiled.mach-o.executable',
'', ''],
'com.apple.product-type.bundle.unit-test': ['wrapper.cfbundle',
'', '.xctest'],
'com.googlecode.gyp.xcode.bundle': ['compiled.mach-o.dylib',
'', '.so'],
'com.apple.product-type.kernel-extension': ['wrapper.kext',
'', '.kext'],
}
def __init__(self, properties=None, id=None, parent=None,
force_outdir=None, force_prefix=None, force_extension=None):
# super
XCTarget.__init__(self, properties, id, parent)
if 'productName' in self._properties and \
'productType' in self._properties and \
not 'productReference' in self._properties and \
self._properties['productType'] in self._product_filetypes:
products_group = None
pbxproject = self.PBXProjectAncestor()
if pbxproject != None:
products_group = pbxproject.ProductsGroup()
if products_group != None:
(filetype, prefix, suffix) = \
self._product_filetypes[self._properties['productType']]
# Xcode does not have a distinct type for loadable modules that are
# pure BSD targets (not in a bundle wrapper). GYP allows such modules
# to be specified by setting a target type to loadable_module without
# having mac_bundle set. These are mapped to the pseudo-product type
# com.googlecode.gyp.xcode.bundle.
#
# By picking up this special type and converting it to a dynamic
# library (com.apple.product-type.library.dynamic) with fix-ups,
# single-file loadable modules can be produced.
#
# MACH_O_TYPE is changed to mh_bundle to produce the proper file type
# (as opposed to mh_dylib). In order for linking to succeed,
# DYLIB_CURRENT_VERSION and DYLIB_COMPATIBILITY_VERSION must be
# cleared. They are meaningless for type mh_bundle.
#
# Finally, the .so extension is forcibly applied over the default
# (.dylib), unless another forced extension is already selected.
# .dylib is plainly wrong, and .bundle is used by loadable_modules in
# bundle wrappers (com.apple.product-type.bundle). .so seems an odd
# choice because it's used as the extension on many other systems that
# don't distinguish between linkable shared libraries and non-linkable
# loadable modules, but there's precedent: Python loadable modules on
# Mac OS X use an .so extension.
if self._properties['productType'] == 'com.googlecode.gyp.xcode.bundle':
self._properties['productType'] = \
'com.apple.product-type.library.dynamic'
self.SetBuildSetting('MACH_O_TYPE', 'mh_bundle')
self.SetBuildSetting('DYLIB_CURRENT_VERSION', '')
self.SetBuildSetting('DYLIB_COMPATIBILITY_VERSION', '')
if force_extension is None:
force_extension = suffix[1:]
if self._properties['productType'] == \
'com.apple.product-type-bundle.unit.test':
if force_extension is None:
force_extension = suffix[1:]
if force_extension is not None:
# If it's a wrapper (bundle), set WRAPPER_EXTENSION.
# Extension override.
suffix = '.' + force_extension
if filetype.startswith('wrapper.'):
self.SetBuildSetting('WRAPPER_EXTENSION', force_extension)
else:
self.SetBuildSetting('EXECUTABLE_EXTENSION', force_extension)
if filetype.startswith('compiled.mach-o.executable'):
product_name = self._properties['productName']
product_name += suffix
suffix = ''
self.SetProperty('productName', product_name)
self.SetBuildSetting('PRODUCT_NAME', product_name)
# Xcode handles most prefixes based on the target type, however there
# are exceptions. If a "BSD Dynamic Library" target is added in the
# Xcode UI, Xcode sets EXECUTABLE_PREFIX. This check duplicates that
# behavior.
if force_prefix is not None:
prefix = force_prefix
if filetype.startswith('wrapper.'):
self.SetBuildSetting('WRAPPER_PREFIX', prefix)
else:
self.SetBuildSetting('EXECUTABLE_PREFIX', prefix)
if force_outdir is not None:
self.SetBuildSetting('TARGET_BUILD_DIR', force_outdir)
# TODO(tvl): Remove the below hack.
# http://code.google.com/p/gyp/issues/detail?id=122
# Some targets include the prefix in the target_name. These targets
# really should just add a product_name setting that doesn't include
# the prefix. For example:
# target_name = 'libevent', product_name = 'event'
# This check cleans up for them.
product_name = self._properties['productName']
prefix_len = len(prefix)
if prefix_len and (product_name[:prefix_len] == prefix):
product_name = product_name[prefix_len:]
self.SetProperty('productName', product_name)
self.SetBuildSetting('PRODUCT_NAME', product_name)
ref_props = {
'explicitFileType': filetype,
'includeInIndex': 0,
'path': prefix + product_name + suffix,
'sourceTree': 'BUILT_PRODUCTS_DIR',
}
file_ref = PBXFileReference(ref_props)
products_group.AppendChild(file_ref)
self.SetProperty('productReference', file_ref)
def GetBuildPhaseByType(self, type):
if not 'buildPhases' in self._properties:
return None
the_phase = None
for phase in self._properties['buildPhases']:
if isinstance(phase, type):
# Some phases may be present in multiples in a well-formed project file,
# but phases like PBXSourcesBuildPhase may only be present singly, and
# this function is intended as an aid to GetBuildPhaseByType. Loop
# over the entire list of phases and assert if more than one of the
# desired type is found.
assert the_phase is None
the_phase = phase
return the_phase
def HeadersPhase(self):
headers_phase = self.GetBuildPhaseByType(PBXHeadersBuildPhase)
if headers_phase is None:
headers_phase = PBXHeadersBuildPhase()
# The headers phase should come before the resources, sources, and
# frameworks phases, if any.
insert_at = len(self._properties['buildPhases'])
for index in xrange(0, len(self._properties['buildPhases'])):
phase = self._properties['buildPhases'][index]
if isinstance(phase, PBXResourcesBuildPhase) or \
isinstance(phase, PBXSourcesBuildPhase) or \
isinstance(phase, PBXFrameworksBuildPhase):
insert_at = index
break
self._properties['buildPhases'].insert(insert_at, headers_phase)
headers_phase.parent = self
return headers_phase
def ResourcesPhase(self):
resources_phase = self.GetBuildPhaseByType(PBXResourcesBuildPhase)
if resources_phase is None:
resources_phase = PBXResourcesBuildPhase()
# The resources phase should come before the sources and frameworks
# phases, if any.
insert_at = len(self._properties['buildPhases'])
for index in xrange(0, len(self._properties['buildPhases'])):
phase = self._properties['buildPhases'][index]
if isinstance(phase, PBXSourcesBuildPhase) or \
isinstance(phase, PBXFrameworksBuildPhase):
insert_at = index
break
self._properties['buildPhases'].insert(insert_at, resources_phase)
resources_phase.parent = self
return resources_phase
def SourcesPhase(self):
sources_phase = self.GetBuildPhaseByType(PBXSourcesBuildPhase)
if sources_phase is None:
sources_phase = PBXSourcesBuildPhase()
self.AppendProperty('buildPhases', sources_phase)
return sources_phase
def FrameworksPhase(self):
frameworks_phase = self.GetBuildPhaseByType(PBXFrameworksBuildPhase)
if frameworks_phase is None:
frameworks_phase = PBXFrameworksBuildPhase()
self.AppendProperty('buildPhases', frameworks_phase)
return frameworks_phase
def AddDependency(self, other):
# super
XCTarget.AddDependency(self, other)
static_library_type = 'com.apple.product-type.library.static'
shared_library_type = 'com.apple.product-type.library.dynamic'
framework_type = 'com.apple.product-type.framework'
if isinstance(other, PBXNativeTarget) and \
'productType' in self._properties and \
self._properties['productType'] != static_library_type and \
'productType' in other._properties and \
(other._properties['productType'] == static_library_type or \
((other._properties['productType'] == shared_library_type or \
other._properties['productType'] == framework_type) and \
((not other.HasBuildSetting('MACH_O_TYPE')) or
other.GetBuildSetting('MACH_O_TYPE') != 'mh_bundle'))):
file_ref = other.GetProperty('productReference')
pbxproject = self.PBXProjectAncestor()
other_pbxproject = other.PBXProjectAncestor()
if pbxproject != other_pbxproject:
other_project_product_group = \
pbxproject.AddOrGetProjectReference(other_pbxproject)[0]
file_ref = other_project_product_group.GetChildByRemoteObject(file_ref)
self.FrameworksPhase().AppendProperty('files',
PBXBuildFile({'fileRef': file_ref}))
class PBXAggregateTarget(XCTarget):
pass
class PBXProject(XCContainerPortal):
# A PBXProject is really just an XCObject, the XCContainerPortal thing is
# just to allow PBXProject to be used in the containerPortal property of
# PBXContainerItemProxy.
"""
Attributes:
path: "sample.xcodeproj". TODO(mark) Document me!
_other_pbxprojects: A dictionary, keyed by other PBXProject objects. Each
value is a reference to the dict in the
projectReferences list associated with the keyed
PBXProject.
"""
_schema = XCContainerPortal._schema.copy()
_schema.update({
'attributes': [0, dict, 0, 0],
'buildConfigurationList': [0, XCConfigurationList, 1, 1,
XCConfigurationList()],
'compatibilityVersion': [0, str, 0, 1, 'Xcode 3.2'],
'hasScannedForEncodings': [0, int, 0, 1, 1],
'mainGroup': [0, PBXGroup, 1, 1, PBXGroup()],
'projectDirPath': [0, str, 0, 1, ''],
'projectReferences': [1, dict, 0, 0],
'projectRoot': [0, str, 0, 1, ''],
'targets': [1, XCTarget, 1, 1, []],
})
def __init__(self, properties=None, id=None, parent=None, path=None):
self.path = path
self._other_pbxprojects = {}
# super
return XCContainerPortal.__init__(self, properties, id, parent)
def Name(self):
name = self.path
if name[-10:] == '.xcodeproj':
name = name[:-10]
return posixpath.basename(name)
def Path(self):
return self.path
def Comment(self):
return 'Project object'
def Children(self):
# super
children = XCContainerPortal.Children(self)
# Add children that the schema doesn't know about. Maybe there's a more
# elegant way around this, but this is the only case where we need to own
# objects in a dictionary (that is itself in a list), and three lines for
# a one-off isn't that big a deal.
if 'projectReferences' in self._properties:
for reference in self._properties['projectReferences']:
children.append(reference['ProductGroup'])
return children
def PBXProjectAncestor(self):
return self
def _GroupByName(self, name):
if not 'mainGroup' in self._properties:
self.SetProperty('mainGroup', PBXGroup())
main_group = self._properties['mainGroup']
group = main_group.GetChildByName(name)
if group is None:
group = PBXGroup({'name': name})
main_group.AppendChild(group)
return group
# SourceGroup and ProductsGroup are created by default in Xcode's own
# templates.
def SourceGroup(self):
return self._GroupByName('Source')
def ProductsGroup(self):
return self._GroupByName('Products')
# IntermediatesGroup is used to collect source-like files that are generated
# by rules or script phases and are placed in intermediate directories such
# as DerivedSources.
def IntermediatesGroup(self):
return self._GroupByName('Intermediates')
# FrameworksGroup and ProjectsGroup are top-level groups used to collect
# frameworks and projects.
def FrameworksGroup(self):
return self._GroupByName('Frameworks')
def ProjectsGroup(self):
return self._GroupByName('Projects')
def RootGroupForPath(self, path):
"""Returns a PBXGroup child of this object to which path should be added.
This method is intended to choose between SourceGroup and
IntermediatesGroup on the basis of whether path is present in a source
directory or an intermediates directory. For the purposes of this
determination, any path located within a derived file directory such as
PROJECT_DERIVED_FILE_DIR is treated as being in an intermediates
directory.
The returned value is a two-element tuple. The first element is the
PBXGroup, and the second element specifies whether that group should be
organized hierarchically (True) or as a single flat list (False).
"""
# TODO(mark): make this a class variable and bind to self on call?
# Also, this list is nowhere near exhaustive.
# INTERMEDIATE_DIR and SHARED_INTERMEDIATE_DIR are used by
# gyp.generator.xcode. There should probably be some way for that module
# to push the names in, rather than having to hard-code them here.
source_tree_groups = {
'DERIVED_FILE_DIR': (self.IntermediatesGroup, True),
'INTERMEDIATE_DIR': (self.IntermediatesGroup, True),
'PROJECT_DERIVED_FILE_DIR': (self.IntermediatesGroup, True),
'SHARED_INTERMEDIATE_DIR': (self.IntermediatesGroup, True),
}
(source_tree, path) = SourceTreeAndPathFromPath(path)
if source_tree != None and source_tree in source_tree_groups:
(group_func, hierarchical) = source_tree_groups[source_tree]
group = group_func()
return (group, hierarchical)
# TODO(mark): make additional choices based on file extension.
return (self.SourceGroup(), True)
def AddOrGetFileInRootGroup(self, path):
"""Returns a PBXFileReference corresponding to path in the correct group
according to RootGroupForPath's heuristics.
If an existing PBXFileReference for path exists, it will be returned.
Otherwise, one will be created and returned.
"""
(group, hierarchical) = self.RootGroupForPath(path)
return group.AddOrGetFileByPath(path, hierarchical)
def RootGroupsTakeOverOnlyChildren(self, recurse=False):
"""Calls TakeOverOnlyChild for all groups in the main group."""
for group in self._properties['mainGroup']._properties['children']:
if isinstance(group, PBXGroup):
group.TakeOverOnlyChild(recurse)
def SortGroups(self):
# Sort the children of the mainGroup (like "Source" and "Products")
# according to their defined order.
self._properties['mainGroup']._properties['children'] = \
sorted(self._properties['mainGroup']._properties['children'],
cmp=lambda x,y: x.CompareRootGroup(y))
# Sort everything else by putting group before files, and going
# alphabetically by name within sections of groups and files. SortGroup
# is recursive.
for group in self._properties['mainGroup']._properties['children']:
if not isinstance(group, PBXGroup):
continue
if group.Name() == 'Products':
# The Products group is a special case. Instead of sorting
# alphabetically, sort things in the order of the targets that
# produce the products. To do this, just build up a new list of
# products based on the targets.
products = []
for target in self._properties['targets']:
if not isinstance(target, PBXNativeTarget):
continue
product = target._properties['productReference']
# Make sure that the product is already in the products group.
assert product in group._properties['children']
products.append(product)
# Make sure that this process doesn't miss anything that was already
# in the products group.
assert len(products) == len(group._properties['children'])
group._properties['children'] = products
else:
group.SortGroup()
def AddOrGetProjectReference(self, other_pbxproject):
"""Add a reference to another project file (via PBXProject object) to this
one.
Returns [ProductGroup, ProjectRef]. ProductGroup is a PBXGroup object in
this project file that contains a PBXReferenceProxy object for each
product of each PBXNativeTarget in the other project file. ProjectRef is
a PBXFileReference to the other project file.
If this project file already references the other project file, the
existing ProductGroup and ProjectRef are returned. The ProductGroup will
still be updated if necessary.
"""
if not 'projectReferences' in self._properties:
self._properties['projectReferences'] = []
product_group = None
project_ref = None
if not other_pbxproject in self._other_pbxprojects:
# This project file isn't yet linked to the other one. Establish the
# link.
product_group = PBXGroup({'name': 'Products'})
# ProductGroup is strong.
product_group.parent = self
# There's nothing unique about this PBXGroup, and if left alone, it will
# wind up with the same set of hashables as all other PBXGroup objects
# owned by the projectReferences list. Add the hashables of the
# remote PBXProject that it's related to.
product_group._hashables.extend(other_pbxproject.Hashables())
# The other project reports its path as relative to the same directory
# that this project's path is relative to. The other project's path
# is not necessarily already relative to this project. Figure out the
# pathname that this project needs to use to refer to the other one.
this_path = posixpath.dirname(self.Path())
projectDirPath = self.GetProperty('projectDirPath')
if projectDirPath:
if posixpath.isabs(projectDirPath[0]):
this_path = projectDirPath
else:
this_path = posixpath.join(this_path, projectDirPath)
other_path = gyp.common.RelativePath(other_pbxproject.Path(), this_path)
# ProjectRef is weak (it's owned by the mainGroup hierarchy).
project_ref = PBXFileReference({
'lastKnownFileType': 'wrapper.pb-project',
'path': other_path,
'sourceTree': 'SOURCE_ROOT',
})
self.ProjectsGroup().AppendChild(project_ref)
ref_dict = {'ProductGroup': product_group, 'ProjectRef': project_ref}
self._other_pbxprojects[other_pbxproject] = ref_dict
self.AppendProperty('projectReferences', ref_dict)
# Xcode seems to sort this list case-insensitively
self._properties['projectReferences'] = \
sorted(self._properties['projectReferences'], cmp=lambda x,y:
cmp(x['ProjectRef'].Name().lower(),
y['ProjectRef'].Name().lower()))
else:
# The link already exists. Pull out the relevnt data.
project_ref_dict = self._other_pbxprojects[other_pbxproject]
product_group = project_ref_dict['ProductGroup']
project_ref = project_ref_dict['ProjectRef']
self._SetUpProductReferences(other_pbxproject, product_group, project_ref)
inherit_unique_symroot = self._AllSymrootsUnique(other_pbxproject, False)
targets = other_pbxproject.GetProperty('targets')
if all(self._AllSymrootsUnique(t, inherit_unique_symroot) for t in targets):
dir_path = project_ref._properties['path']
product_group._hashables.extend(dir_path)
return [product_group, project_ref]
def _AllSymrootsUnique(self, target, inherit_unique_symroot):
# Returns True if all configurations have a unique 'SYMROOT' attribute.
# The value of inherit_unique_symroot decides, if a configuration is assumed
# to inherit a unique 'SYMROOT' attribute from its parent, if it doesn't
# define an explicit value for 'SYMROOT'.
symroots = self._DefinedSymroots(target)
for s in self._DefinedSymroots(target):
if (s is not None and not self._IsUniqueSymrootForTarget(s) or
s is None and not inherit_unique_symroot):
return False
return True if symroots else inherit_unique_symroot
def _DefinedSymroots(self, target):
# Returns all values for the 'SYMROOT' attribute defined in all
# configurations for this target. If any configuration doesn't define the
# 'SYMROOT' attribute, None is added to the returned set. If all
# configurations don't define the 'SYMROOT' attribute, an empty set is
# returned.
config_list = target.GetProperty('buildConfigurationList')
symroots = set()
for config in config_list.GetProperty('buildConfigurations'):
setting = config.GetProperty('buildSettings')
if 'SYMROOT' in setting:
symroots.add(setting['SYMROOT'])
else:
symroots.add(None)
if len(symroots) == 1 and None in symroots:
return set()
return symroots
def _IsUniqueSymrootForTarget(self, symroot):
# This method returns True if all configurations in target contain a
# 'SYMROOT' attribute that is unique for the given target. A value is
# unique, if the Xcode macro '$SRCROOT' appears in it in any form.
uniquifier = ['$SRCROOT', '$(SRCROOT)']
if any(x in symroot for x in uniquifier):
return True
return False
def _SetUpProductReferences(self, other_pbxproject, product_group,
project_ref):
# TODO(mark): This only adds references to products in other_pbxproject
# when they don't exist in this pbxproject. Perhaps it should also
# remove references from this pbxproject that are no longer present in
# other_pbxproject. Perhaps it should update various properties if they
# change.
for target in other_pbxproject._properties['targets']:
if not isinstance(target, PBXNativeTarget):
continue
other_fileref = target._properties['productReference']
if product_group.GetChildByRemoteObject(other_fileref) is None:
# Xcode sets remoteInfo to the name of the target and not the name
# of its product, despite this proxy being a reference to the product.
container_item = PBXContainerItemProxy({
'containerPortal': project_ref,
'proxyType': 2,
'remoteGlobalIDString': other_fileref,
'remoteInfo': target.Name()
})
# TODO(mark): Does sourceTree get copied straight over from the other
# project? Can the other project ever have lastKnownFileType here
# instead of explicitFileType? (Use it if so?) Can path ever be
# unset? (I don't think so.) Can other_fileref have name set, and
# does it impact the PBXReferenceProxy if so? These are the questions
# that perhaps will be answered one day.
reference_proxy = PBXReferenceProxy({
'fileType': other_fileref._properties['explicitFileType'],
'path': other_fileref._properties['path'],
'sourceTree': other_fileref._properties['sourceTree'],
'remoteRef': container_item,
})
product_group.AppendChild(reference_proxy)
def SortRemoteProductReferences(self):
# For each remote project file, sort the associated ProductGroup in the
# same order that the targets are sorted in the remote project file. This
# is the sort order used by Xcode.
def CompareProducts(x, y, remote_products):
# x and y are PBXReferenceProxy objects. Go through their associated
# PBXContainerItem to get the remote PBXFileReference, which will be
# present in the remote_products list.
x_remote = x._properties['remoteRef']._properties['remoteGlobalIDString']
y_remote = y._properties['remoteRef']._properties['remoteGlobalIDString']
x_index = remote_products.index(x_remote)
y_index = remote_products.index(y_remote)
# Use the order of each remote PBXFileReference in remote_products to
# determine the sort order.
return cmp(x_index, y_index)
for other_pbxproject, ref_dict in self._other_pbxprojects.iteritems():
# Build up a list of products in the remote project file, ordered the
# same as the targets that produce them.
remote_products = []
for target in other_pbxproject._properties['targets']:
if not isinstance(target, PBXNativeTarget):
continue
remote_products.append(target._properties['productReference'])
# Sort the PBXReferenceProxy children according to the list of remote
# products.
product_group = ref_dict['ProductGroup']
product_group._properties['children'] = sorted(
product_group._properties['children'],
cmp=lambda x, y, rp=remote_products: CompareProducts(x, y, rp))
class XCProjectFile(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'archiveVersion': [0, int, 0, 1, 1],
'classes': [0, dict, 0, 1, {}],
'objectVersion': [0, int, 0, 1, 46],
'rootObject': [0, PBXProject, 1, 1],
})
def ComputeIDs(self, recursive=True, overwrite=True, hash=None):
# Although XCProjectFile is implemented here as an XCObject, it's not a
# proper object in the Xcode sense, and it certainly doesn't have its own
# ID. Pass through an attempt to update IDs to the real root object.
if recursive:
self._properties['rootObject'].ComputeIDs(recursive, overwrite, hash)
def Print(self, file=sys.stdout):
self.VerifyHasRequiredProperties()
# Add the special "objects" property, which will be caught and handled
# separately during printing. This structure allows a fairly standard
# loop do the normal printing.
self._properties['objects'] = {}
self._XCPrint(file, 0, '// !$*UTF8*$!\n')
if self._should_print_single_line:
self._XCPrint(file, 0, '{ ')
else:
self._XCPrint(file, 0, '{\n')
for property, value in sorted(self._properties.iteritems(),
cmp=lambda x, y: cmp(x, y)):
if property == 'objects':
self._PrintObjects(file)
else:
self._XCKVPrint(file, 1, property, value)
self._XCPrint(file, 0, '}\n')
del self._properties['objects']
def _PrintObjects(self, file):
if self._should_print_single_line:
self._XCPrint(file, 0, 'objects = {')
else:
self._XCPrint(file, 1, 'objects = {\n')
objects_by_class = {}
for object in self.Descendants():
if object == self:
continue
class_name = object.__class__.__name__
if not class_name in objects_by_class:
objects_by_class[class_name] = []
objects_by_class[class_name].append(object)
for class_name in sorted(objects_by_class):
self._XCPrint(file, 0, '\n')
self._XCPrint(file, 0, '/* Begin ' + class_name + ' section */\n')
for object in sorted(objects_by_class[class_name],
cmp=lambda x, y: cmp(x.id, y.id)):
object.Print(file)
self._XCPrint(file, 0, '/* End ' + class_name + ' section */\n')
if self._should_print_single_line:
self._XCPrint(file, 0, '}; ')
else:
self._XCPrint(file, 1, '};\n')
| mit |
intermezzo-fr/hillary-clinton-emails | scripts/outputCsvs.py | 5 | 3577 | import numpy as np
import pandas as pd
def normalize_address(raw_address):
for c in ["'", ",", "°", "•", "`", '"', "‘", "-"]:
raw_address = raw_address.replace(c, "")
raw_address = raw_address.lower()
if "<" in raw_address:
prefix = raw_address[:raw_address.index("<")].strip()
if prefix:
return prefix
return raw_address.strip()
emails = pd.read_csv("input/emailsNoId.csv")
emails["MetadataTo"].replace(np.nan, "", inplace=True)
emails["ExtractedTo"].replace(np.nan, "", inplace=True)
emails["MetadataFrom"].replace(np.nan, "", inplace=True)
emails["ExtractedFrom"].replace(np.nan, "", inplace=True)
emails.sort(columns=["DocNumber"], inplace=True)
emails.insert(0, "Id", list(range(1, len(emails)+1)))
emails.insert(5, "SenderPersonId", np.nan)
alias_person = pd.read_csv("versionedInput/alias_person.csv")
alias_person["AliasName"] = [normalize_address(alias) for alias in alias_person["AliasName"]]
persons = pd.DataFrame(columns=["Id", "Name"])
aliases = pd.DataFrame(columns=["Id", "Alias", "PersonId"])
email_receivers = pd.DataFrame(columns=["Id", "EmailId", "PersonId"]).astype(int)
def add_alias(aliases, persons, alias_name, person_name):
if len(np.where(aliases["Alias"]==alias_name)[0])>0:
return
locs = np.where(persons["Name"]==person_name)[0]
if len(locs)>0:
person_id = persons["Id"][locs[0]]
else:
person_id = len(persons)+1
persons.loc[person_id-1] = [person_id, person_name]
alias_id = len(aliases)+1
aliases.loc[alias_id-1] = [alias_id, alias_name.lower(), person_id]
for (i, alias_person) in alias_person.iterrows():
add_alias(aliases, persons, alias_person["AliasName"], alias_person["PersonName"])
log = open("working/outputCsvsLog.txt", "w")
for (i, email) in emails.iterrows():
from_person_id = None
from_address = normalize_address(email["MetadataFrom"].split(";")[0])
if from_address != "":
locs = np.where(aliases["Alias"]==from_address)[0]
if len(locs)==0:
add_alias(aliases, persons, from_address, from_address)
log.write("Added From Person: %s\n" % from_address)
loc = np.where(aliases["Alias"]==from_address)[0][0]
from_person_id = aliases["PersonId"][loc]
from_person_name = persons["Name"][from_person_id-1]
emails.loc[i, "SenderPersonId"] = from_person_id
if email["ExtractedFrom"] != "":
add_alias(aliases, persons, normalize_address(email["ExtractedFrom"]), from_person_name)
to_addresses = email["MetadataTo"].split(";") + email["ExtractedTo"].split(";")
to_addresses = sorted(list(set([normalize_address(x) for x in to_addresses])))
if "" in to_addresses:
to_addresses.remove("")
for to_address in to_addresses:
locs = np.where(aliases["Alias"]==to_address)[0]
if len(locs)==0:
add_alias(aliases, persons, to_address, to_address)
log.write("Added To Person: %s\n" % to_address)
loc = np.where(aliases["Alias"]==to_address)[0][0]
# don't add a receiver if they were also the sender
if from_person_id != aliases["PersonId"][loc]:
email_receivers.loc[len(email_receivers)] = [len(email_receivers)+1, email["Id"], aliases["PersonId"][loc]]
persons.to_csv("output/Persons.csv", index=False)
aliases.to_csv("output/Aliases.csv", index=False)
emails.to_csv("output/Emails.csv", index=False, float_format="%0.0f")
email_receivers.to_csv("output/EmailReceivers.csv", index=False, float_format="%0.0f")
log.close() | mit |
magacoin/magacoin | contrib/devtools/optimize-pngs.py | 51 | 3392 | #!/usr/bin/env python
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Run this script every time you change one of the png files. Using pngcrush, it will optimize the png files, remove various color profiles, remove ancillary chunks (alla) and text chunks (text).
#pngcrush -brute -ow -rem gAMA -rem cHRM -rem iCCP -rem sRGB -rem alla -rem text
'''
import os
import sys
import subprocess
import hashlib
from PIL import Image
def file_hash(filename):
'''Return hash of raw file contents'''
with open(filename, 'rb') as f:
return hashlib.sha256(f.read()).hexdigest()
def content_hash(filename):
'''Return hash of RGBA contents of image'''
i = Image.open(filename)
i = i.convert('RGBA')
data = i.tobytes()
return hashlib.sha256(data).hexdigest()
pngcrush = 'pngcrush'
git = 'git'
folders = ["src/qt/res/movies", "src/qt/res/icons", "share/pixmaps"]
basePath = subprocess.check_output([git, 'rev-parse', '--show-toplevel']).rstrip('\n')
totalSaveBytes = 0
noHashChange = True
outputArray = []
for folder in folders:
absFolder=os.path.join(basePath, folder)
for file in os.listdir(absFolder):
extension = os.path.splitext(file)[1]
if extension.lower() == '.png':
print("optimizing "+file+"..."),
file_path = os.path.join(absFolder, file)
fileMetaMap = {'file' : file, 'osize': os.path.getsize(file_path), 'sha256Old' : file_hash(file_path)};
fileMetaMap['contentHashPre'] = content_hash(file_path)
pngCrushOutput = ""
try:
pngCrushOutput = subprocess.check_output(
[pngcrush, "-brute", "-ow", "-rem", "gAMA", "-rem", "cHRM", "-rem", "iCCP", "-rem", "sRGB", "-rem", "alla", "-rem", "text", file_path],
stderr=subprocess.STDOUT).rstrip('\n')
except:
print "pngcrush is not installed, aborting..."
sys.exit(0)
#verify
if "Not a PNG file" in subprocess.check_output([pngcrush, "-n", "-v", file_path], stderr=subprocess.STDOUT):
print "PNG file "+file+" is corrupted after crushing, check out pngcursh version"
sys.exit(1)
fileMetaMap['sha256New'] = file_hash(file_path)
fileMetaMap['contentHashPost'] = content_hash(file_path)
if fileMetaMap['contentHashPre'] != fileMetaMap['contentHashPost']:
print "Image contents of PNG file "+file+" before and after crushing don't match"
sys.exit(1)
fileMetaMap['psize'] = os.path.getsize(file_path)
outputArray.append(fileMetaMap)
print("done\n"),
print "summary:\n+++++++++++++++++"
for fileDict in outputArray:
oldHash = fileDict['sha256Old']
newHash = fileDict['sha256New']
totalSaveBytes += fileDict['osize'] - fileDict['psize']
noHashChange = noHashChange and (oldHash == newHash)
print fileDict['file']+"\n size diff from: "+str(fileDict['osize'])+" to: "+str(fileDict['psize'])+"\n old sha256: "+oldHash+"\n new sha256: "+newHash+"\n"
print "completed. Checksum stable: "+str(noHashChange)+". Total reduction: "+str(totalSaveBytes)+" bytes"
| mit |
gppezzi/easybuild-framework | easybuild/toolchains/linalg/blis.py | 2 | 1479 | ##
# Copyright 2013-2019 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
Support for BLIS as toolchain linear algebra library.
:author: Kenneth Hoste (Ghent University)
:author: Bart Oldeman (McGill University, Calcul Quebec, Compute Canada)
"""
from easybuild.tools.toolchain.linalg import LinAlg
TC_CONSTANT_BLIS = 'BLIS'
class Blis(LinAlg):
"""
Trivial class, provides BLIS support.
"""
BLAS_MODULE_NAME = ['BLIS']
BLAS_LIB = ['blis']
BLAS_FAMILY = TC_CONSTANT_BLIS
| gpl-2.0 |
lizardsystem/lizard-workspace | lizard_workspace/migrations/0019_auto__add_field_wmsserver_js_popup_class.py | 1 | 18569 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'WmsServer.js_popup_class'
db.add_column('lizard_workspace_wmsserver', 'js_popup_class', self.gf('django.db.models.fields.CharField')(max_length=80, null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'WmsServer.js_popup_class'
db.delete_column('lizard_workspace_wmsserver', 'js_popup_class')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'lizard_map.backgroundmap': {
'Meta': {'ordering': "('index',)", 'object_name': 'BackgroundMap'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'google_type': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.IntegerField', [], {'default': '100'}),
'is_base_layer': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'layer_names': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'layer_type': ('django.db.models.fields.IntegerField', [], {}),
'layer_url': ('django.db.models.fields.CharField', [], {'default': "'http://tile.openstreetmap.nl/tiles/${z}/${x}/${y}.png'", 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'lizard_map.workspacestorage': {
'Meta': {'object_name': 'WorkspaceStorage'},
'absolute': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'background_map': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_map.BackgroundMap']", 'null': 'True', 'blank': 'True'}),
'custom_time': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'dt': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'dt_end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'dt_start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'td': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'td_end': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'td_start': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'x_max': ('django.db.models.fields.FloatField', [], {'default': '1254790'}),
'x_min': ('django.db.models.fields.FloatField', [], {'default': '-14675'}),
'y_max': ('django.db.models.fields.FloatField', [], {'default': '6964942'}),
'y_min': ('django.db.models.fields.FloatField', [], {'default': '6668977'})
},
'lizard_security.dataset': {
'Meta': {'ordering': "['name']", 'object_name': 'DataSet'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'})
},
'lizard_workspace.app': {
'Meta': {'object_name': 'App'},
'action_params': ('django.db.models.fields.TextField', [], {'default': "'{}'", 'blank': 'True'}),
'action_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'appscreen': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['lizard_workspace.AppScreen']"}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'icon': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_workspace.AppIcons']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mouse_over': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '256', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'root_map': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_workspace.LayerFolder']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'})
},
'lizard_workspace.appicons': {
'Meta': {'object_name': 'AppIcons'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'lizard_workspace.appscreen': {
'Meta': {'object_name': 'AppScreen'},
'apps': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'screen'", 'symmetrical': 'False', 'through': "orm['lizard_workspace.AppScreenAppItems']", 'to': "orm['lizard_workspace.App']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'})
},
'lizard_workspace.appscreenappitems': {
'Meta': {'object_name': 'AppScreenAppItems'},
'app': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_workspace.App']"}),
'appscreen': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_workspace.AppScreen']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.IntegerField', [], {'default': '100'})
},
'lizard_workspace.category': {
'Meta': {'object_name': 'Category'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'})
},
'lizard_workspace.layer': {
'Meta': {'object_name': 'Layer'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_workspace.Category']", 'null': 'True', 'blank': 'True'}),
'data_set': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_security.DataSet']", 'null': 'True', 'blank': 'True'}),
'filter': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_base_layer': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_clickable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_local_server': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'js_popup_class': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'}),
'layers': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'location_filter': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'ollayer_class': ('django.db.models.fields.CharField', [], {'default': "'OpenLayers.Layer.WMS'", 'max_length': '80'}),
'options': ('django.db.models.fields.TextField', [], {'default': "'{}'", 'null': 'True', 'blank': 'True'}),
'owner_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'request_params': ('django.db.models.fields.TextField', [], {'default': "'{}'", 'null': 'True', 'blank': 'True'}),
'server': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_workspace.WmsServer']", 'null': 'True', 'blank': 'True'}),
'single_tile': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '200', 'db_index': 'True'}),
'source_ident': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['lizard_workspace.Tag']", 'null': 'True', 'blank': 'True'}),
'use_location_filter': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'valid': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'lizard_workspace.layerfolder': {
'Meta': {'object_name': 'LayerFolder'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'layer_tag': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['lizard_workspace.Tag']", 'null': 'True', 'blank': 'True'}),
'layers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['lizard_workspace.Layer']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children_set'", 'null': 'True', 'to': "orm['lizard_workspace.LayerFolder']"})
},
'lizard_workspace.layerworkspace': {
'Meta': {'ordering': "['name']", 'object_name': 'LayerWorkspace', '_ormbases': ['lizard_map.WorkspaceStorage']},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_workspace.Category']", 'null': 'True', 'blank': 'True'}),
'data_set': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_security.DataSet']", 'null': 'True', 'blank': 'True'}),
'layers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['lizard_workspace.Layer']", 'null': 'True', 'through': "orm['lizard_workspace.LayerWorkspaceItem']", 'blank': 'True'}),
'owner_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'personal_category': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'}),
'workspacestorage_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['lizard_map.WorkspaceStorage']", 'unique': 'True', 'primary_key': 'True'})
},
'lizard_workspace.layerworkspaceitem': {
'Meta': {'object_name': 'LayerWorkspaceItem'},
'clickable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'filter_string': ('django.db.models.fields.CharField', [], {'max_length': '124', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.IntegerField', [], {'default': '100'}),
'layer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_workspace.Layer']"}),
'layer_workspace': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_workspace.LayerWorkspace']"}),
'opacity': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'lizard_workspace.synctask': {
'Meta': {'object_name': 'SyncTask'},
'data_set': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_security.DataSet']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_result': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'server': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_workspace.WmsServer']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_workspace.Tag']", 'null': 'True', 'blank': 'True'})
},
'lizard_workspace.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'})
},
'lizard_workspace.thematicmap': {
'Meta': {'object_name': 'ThematicMap'},
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'})
},
'lizard_workspace.wmsserver': {
'Meta': {'object_name': 'WmsServer'},
'abstract': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_clickable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_local_server': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'js_popup_class': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '256', 'blank': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['lizard_workspace']
| gpl-3.0 |
t-amerssonis/okami | src/Okami/third-parts/gitpy/files.py | 122 | 1831 | # Copyright (c) 2009, Rotem Yaari <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of organization nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Rotem Yaari ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Rotem Yaari BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class ModifiedFile(object):
def __init__(self, filename):
super(ModifiedFile, self).__init__()
self.filename = filename
def __repr__(self):
return self.filename
def __eq__(self, other):
return isinstance(other, ModifiedFile) and other.filename == self.filename
| mit |
sonofmun/DissProject | unit_tests/test_sem_extract_pipeline.py | 1 | 6474 | from unittest import TestCase
from Data_Production.sem_extract_pipeline import SemPipeline, ParamTester
from collections import Counter
import re
import os
import shutil
import numpy as np
__author__ = 'matt'
class TestInit(TestCase):
def test_sim_algo_reset(self):
""" Tests whether an incorrect value for sim_algo is changed to 'cosine'"""
pipe = SemPipeline(sim_algo='default')
self.assertEqual(pipe.sim_algo, 'cosine')
def test_algo_reset(self):
""" Tests whether an incorrect value for algo is changed to 'both'"""
pipe = SemPipeline(algo='default')
self.assertEqual(pipe.algo, 'both')
class TestUtils(TestCase):
def setUp(self):
os.mkdir('unit_tests/testing')
self.pipe = SemPipeline(files='unit_tests/testing')
self.pipe.dest = 'testing'
self.pipe.corpus = 'TESTING'
def tearDown(self):
shutil.rmtree('unit_tests/testing')
def test_produce_file_names(self):
answer = 'testing/{step}_10_lems=True_TESTING_min_occ=1_no_stops=False_weighted=True.dat'
self.assertEqual(self.pipe.produce_file_names('COOC'), answer.format(step='COOC'))
def test_make_dest_new_directory(self):
self.pipe.make_dest()
self.assertTrue(os.path.isdir('unit_tests/testing/10'))
def test_make_dest_existing_directory(self):
os.mkdir('unit_tests/testing/10')
self.pipe.make_dest()
self.assertTrue(os.path.isdir('unit_tests/testing/10'))
class TestCounter(TestCase):
def setUp(self):
self.s = 'the quick brown fox jumped over the lazy green dog'.split()
self.xml = ['<w id="Mt.1.1.1" ana="N-_----NSF-" lem="βίβλος">Βίβλος</w>',
'<w id="Mt.1.1.2" ana="N-_----GSF-" lem="γένεσις">γενέσεως</w>',
'<w id="Mt.1.1.3" ana="N-_----GSM-" lem="Ἰησοῦς">Ἰησοῦ</w>',
'<w id="Mt.1.1.4" ana="N-_----GSM-" lem="Χριστός">χριστοῦ</w>']
self.inflected_pattern = re.compile(r'.+?>([^<]*).*')
self.lem_pattern = re.compile(r'.+?lem="([^"]*).*')
def test_sentence(self):
""" Tests a sentence with a 1-word context window
"""
answer = Counter({'brown': Counter({'quick': 1, 'fox': 1}), 'green': Counter({'dog': 1, 'lazy': 1}),
'the': Counter({'quick': 1, 'over': 1, 'lazy': 1}), 'quick': Counter({'the': 1, 'brown': 1}),
'jumped': Counter({'over': 1, 'fox': 1}), 'over': Counter({'jumped': 1, 'the': 1}),
'dog': Counter({'green': 1}), 'lazy': Counter({'the': 1, 'green': 1}),
'fox': Counter({'jumped': 1, 'brown': 1})})
self.assertEqual(SemPipeline(win_size=1, c=1).word_counter(self.s, Counter()), answer)
def test_unweighted_window(self):
""" Tests a sentence with a 5 word, unweighted context window
"""
counts = SemPipeline(win_size=5, weighted=False).word_counter(self.s, Counter())
answer = Counter({'the': 2, 'quick': 1, 'brown': 1, 'fox': 1, 'over': 1, 'lazy': 1, 'green': 1, 'dog': 1})
self.assertEqual(counts['jumped'], answer)
def test_weighted_window(self):
""" Tests a sentence with a 5 word, unweighted context window
"""
counts = SemPipeline(win_size=5, weighted=True).word_counter(self.s, Counter())
answer = Counter({'the': 6, 'quick': 3, 'brown': 4, 'fox': 5, 'over': 5, 'lazy': 3, 'green': 2, 'dog': 1})
self.assertEqual(counts['jumped'], answer)
def test_min_lems(self):
""" Tests to make sure that words in the min_lems set are not counted
"""
counts = SemPipeline(win_size=5, weighted=True).word_counter(self.s, Counter(), min_lems={'green'})
answer = Counter({'the': 6, 'quick': 3, 'brown': 4, 'fox': 5, 'over': 5, 'lazy': 3, 'green': 2, 'dog': 1})
self.assertEqual(counts['jumped'], answer)
self.assertTrue('green' not in counts.keys())
def test_inflected_word_extract(self):
""" Tests to make sure that the inflected forms of words are extracted correctly from a string
"""
words = SemPipeline().word_extract(self.xml, self.inflected_pattern)
answer = ['Βίβλος', 'γενέσεως', 'Ἰησοῦ', 'χριστοῦ']
self.assertCountEqual(words, answer)
def test_lemmatized_word_extract(self):
""" Tests to make sure that the lemmatized forms of words are extracted correctly from a string
"""
words = SemPipeline().word_extract(self.xml, self.lem_pattern)
answer = ['βίβλος', 'γένεσις', 'Ἰησοῦς', 'Χριστός']
self.assertCountEqual(words, answer)
class TestAlgos(TestCase):
def setUp(self):
self.pipe = SemPipeline(files='unit_tests/data/temp_data')
self.pipe.cols = 108
self.pipe.coll_df = np.memmap(
'unit_tests/data/10/COOC_10_lems=True_cooc_test_min_occ=1_no_stops=False_weighted=True.dat',
dtype='float', mode='r', shape=(self.pipe.cols, self.pipe.cols)
)
self.ll_data = np.memmap(
'unit_tests/data/10/LL_10_lems=True_cooc_test_min_occ=1_no_stops=False_weighted=True.dat',
dtype='float', mode='r', shape=(self.pipe.cols, self.pipe.cols)
)
self.pipe.make_dest()
def tearDown(self):
shutil.rmtree(self.pipe.dest)
del self.pipe
def test_log_L(self):
k = self.pipe.coll_df[0]
n = np.sum(k)
p = np.sum(self.pipe.coll_df, axis=0) / np.sum(self.pipe.coll_df)
self.assertIsInstance(SemPipeline(algo='LL').log_L(k, n, p), np.memmap)
self.assertEqual([x.round(7) for x in SemPipeline(algo='LL').log_L(k, n, p)[:5]],
[-2.0560897, -51.7183421, -49.3194270, -3.0913858, -1.0256429])
def test_log_space_L(self):
k = self.pipe.coll_df[0]
n = np.sum(k)
p = np.sum(self.pipe.coll_df, axis=0) / np.sum(self.pipe.coll_df)
self.assertIsInstance(SemPipeline(algo='LL').log_space_L(k, n, p), np.memmap)
self.assertEqual([x.round(7) for x in SemPipeline(algo='LL').log_space_L(k, n, p)[:5]],
[-2.0560897, -51.7183421, -49.3194270, -3.0913858, -1.0256429])
def test_LL(self):
self.pipe.LL()
self.assertTrue(np.array_equal(np.round(self.pipe.LL_df, 7), np.round(self.ll_data, 7))) | gpl-3.0 |
aivarsk/scrapy | scrapy/utils/url.py | 89 | 4183 | """
This module contains general purpose URL functions not found in the standard
library.
Some of the functions that used to be imported from this module have been moved
to the w3lib.url module. Always import those from there instead.
"""
import posixpath
from six.moves.urllib.parse import (ParseResult, urlunparse, urldefrag,
urlparse, parse_qsl, urlencode,
unquote)
# scrapy.utils.url was moved to w3lib.url and import * ensures this
# move doesn't break old code
from w3lib.url import *
from w3lib.url import _safe_chars
from scrapy.utils.python import to_native_str
def url_is_from_any_domain(url, domains):
"""Return True if the url belongs to any of the given domains"""
host = parse_url(url).netloc.lower()
if not host:
return False
domains = [d.lower() for d in domains]
return any((host == d) or (host.endswith('.%s' % d)) for d in domains)
def url_is_from_spider(url, spider):
"""Return True if the url belongs to the given spider"""
return url_is_from_any_domain(url,
[spider.name] + list(getattr(spider, 'allowed_domains', [])))
def url_has_any_extension(url, extensions):
return posixpath.splitext(parse_url(url).path)[1].lower() in extensions
def canonicalize_url(url, keep_blank_values=True, keep_fragments=False,
encoding=None):
"""Canonicalize the given url by applying the following procedures:
- sort query arguments, first by key, then by value
- percent encode paths and query arguments. non-ASCII characters are
percent-encoded using UTF-8 (RFC-3986)
- normalize all spaces (in query arguments) '+' (plus symbol)
- normalize percent encodings case (%2f -> %2F)
- remove query arguments with blank values (unless keep_blank_values is True)
- remove fragments (unless keep_fragments is True)
The url passed can be a str or unicode, while the url returned is always a
str.
For examples see the tests in tests/test_utils_url.py
"""
scheme, netloc, path, params, query, fragment = parse_url(url)
keyvals = parse_qsl(query, keep_blank_values)
keyvals.sort()
query = urlencode(keyvals)
# XXX: copied from w3lib.url.safe_url_string to add encoding argument
# path = to_native_str(path, encoding)
# path = moves.urllib.parse.quote(path, _safe_chars, encoding='latin1') or '/'
path = safe_url_string(_unquotepath(path)) or '/'
fragment = '' if not keep_fragments else fragment
return urlunparse((scheme, netloc.lower(), path, params, query, fragment))
def _unquotepath(path):
for reserved in ('2f', '2F', '3f', '3F'):
path = path.replace('%' + reserved, '%25' + reserved.upper())
return unquote(path)
def parse_url(url, encoding=None):
"""Return urlparsed url from the given argument (which could be an already
parsed url)
"""
if isinstance(url, ParseResult):
return url
return urlparse(to_native_str(url, encoding))
def escape_ajax(url):
"""
Return the crawleable url according to:
http://code.google.com/web/ajaxcrawling/docs/getting-started.html
>>> escape_ajax("www.example.com/ajax.html#!key=value")
'www.example.com/ajax.html?_escaped_fragment_=key%3Dvalue'
>>> escape_ajax("www.example.com/ajax.html?k1=v1&k2=v2#!key=value")
'www.example.com/ajax.html?k1=v1&k2=v2&_escaped_fragment_=key%3Dvalue'
>>> escape_ajax("www.example.com/ajax.html?#!key=value")
'www.example.com/ajax.html?_escaped_fragment_=key%3Dvalue'
>>> escape_ajax("www.example.com/ajax.html#!")
'www.example.com/ajax.html?_escaped_fragment_='
URLs that are not "AJAX crawlable" (according to Google) returned as-is:
>>> escape_ajax("www.example.com/ajax.html#key=value")
'www.example.com/ajax.html#key=value'
>>> escape_ajax("www.example.com/ajax.html#")
'www.example.com/ajax.html#'
>>> escape_ajax("www.example.com/ajax.html")
'www.example.com/ajax.html'
"""
defrag, frag = urldefrag(url)
if not frag.startswith('!'):
return url
return add_or_replace_parameter(defrag, '_escaped_fragment_', frag[1:])
| bsd-3-clause |
squidsoup/pip | pip/req/req_install.py | 5 | 42989 | from __future__ import absolute_import
import logging
import os
import re
import shutil
import sys
import tempfile
import warnings
import zipfile
from distutils.util import change_root
from distutils import sysconfig
from email.parser import FeedParser
from pip._vendor import pkg_resources, six
from pip._vendor.distlib.markers import interpret as markers_interpret
from pip._vendor.six.moves import configparser
import pip.wheel
from pip.compat import native_str, WINDOWS
from pip.download import is_url, url_to_path, path_to_url, is_archive_file
from pip.exceptions import (
InstallationError, UninstallationError, UnsupportedWheel,
)
from pip.locations import (
bin_py, running_under_virtualenv, PIP_DELETE_MARKER_FILENAME, bin_user,
)
from pip.utils import (
display_path, rmtree, ask_path_exists, backup_dir, is_installable_dir,
dist_in_usersite, dist_in_site_packages, egg_link_path, make_path_relative,
call_subprocess, read_text_file, FakeFile, _make_build_dir, ensure_dir,
get_installed_version
)
from pip.utils.deprecation import RemovedInPip8Warning
from pip.utils.logging import indent_log
from pip.req.req_uninstall import UninstallPathSet
from pip.vcs import vcs
from pip.wheel import move_wheel_files, Wheel
from pip._vendor.packaging.version import Version
logger = logging.getLogger(__name__)
def _strip_extras(path):
m = re.match(r'^(.+)(\[[^\]]+\])$', path)
extras = None
if m:
path_no_extras = m.group(1)
extras = m.group(2)
else:
path_no_extras = path
return path_no_extras, extras
class InstallRequirement(object):
def __init__(self, req, comes_from, source_dir=None, editable=False,
link=None, as_egg=False, update=True, editable_options=None,
pycompile=True, markers=None, isolated=False, options=None,
wheel_cache=None, constraint=False):
self.extras = ()
if isinstance(req, six.string_types):
req = pkg_resources.Requirement.parse(req)
self.extras = req.extras
self.req = req
self.comes_from = comes_from
self.constraint = constraint
self.source_dir = source_dir
self.editable = editable
if editable_options is None:
editable_options = {}
self.editable_options = editable_options
self._wheel_cache = wheel_cache
self.link = link
self.as_egg = as_egg
self.markers = markers
self._egg_info_path = None
# This holds the pkg_resources.Distribution object if this requirement
# is already available:
self.satisfied_by = None
# This hold the pkg_resources.Distribution object if this requirement
# conflicts with another installed distribution:
self.conflicts_with = None
# Temporary build location
self._temp_build_dir = None
# Used to store the global directory where the _temp_build_dir should
# have been created. Cf _correct_build_location method.
self._ideal_global_dir = None
# True if the editable should be updated:
self.update = update
# Set to True after successful installation
self.install_succeeded = None
# UninstallPathSet of uninstalled distribution (for possible rollback)
self.uninstalled = None
self.use_user_site = False
self.target_dir = None
self.options = options if options else {}
self.pycompile = pycompile
# Set to True after successful preparation of this requirement
self.prepared = False
self.isolated = isolated
@classmethod
def from_editable(cls, editable_req, comes_from=None, default_vcs=None,
isolated=False, options=None, wheel_cache=None,
constraint=False):
from pip.index import Link
name, url, extras_override, editable_options = parse_editable(
editable_req, default_vcs)
if url.startswith('file:'):
source_dir = url_to_path(url)
else:
source_dir = None
res = cls(name, comes_from, source_dir=source_dir,
editable=True,
link=Link(url),
constraint=constraint,
editable_options=editable_options,
isolated=isolated,
options=options if options else {},
wheel_cache=wheel_cache)
if extras_override is not None:
res.extras = extras_override
return res
@classmethod
def from_line(
cls, name, comes_from=None, isolated=False, options=None,
wheel_cache=None, constraint=False):
"""Creates an InstallRequirement from a name, which might be a
requirement, directory containing 'setup.py', filename, or URL.
"""
from pip.index import Link
if is_url(name):
marker_sep = '; '
else:
marker_sep = ';'
if marker_sep in name:
name, markers = name.split(marker_sep, 1)
markers = markers.strip()
if not markers:
markers = None
else:
markers = None
name = name.strip()
req = None
path = os.path.normpath(os.path.abspath(name))
link = None
extras = None
if is_url(name):
link = Link(name)
else:
p, extras = _strip_extras(path)
if (os.path.isdir(p) and
(os.path.sep in name or name.startswith('.'))):
if not is_installable_dir(p):
raise InstallationError(
"Directory %r is not installable. File 'setup.py' "
"not found." % name
)
link = Link(path_to_url(p))
elif is_archive_file(p):
if not os.path.isfile(p):
logger.warning(
'Requirement %r looks like a filename, but the '
'file does not exist',
name
)
link = Link(path_to_url(p))
# it's a local file, dir, or url
if link:
# Handle relative file URLs
if link.scheme == 'file' and re.search(r'\.\./', link.url):
link = Link(
path_to_url(os.path.normpath(os.path.abspath(link.path))))
# wheel file
if link.is_wheel:
wheel = Wheel(link.filename) # can raise InvalidWheelFilename
if not wheel.supported():
raise UnsupportedWheel(
"%s is not a supported wheel on this platform." %
wheel.filename
)
req = "%s==%s" % (wheel.name, wheel.version)
else:
# set the req to the egg fragment. when it's not there, this
# will become an 'unnamed' requirement
req = link.egg_fragment
# a requirement specifier
else:
req = name
options = options if options else {}
res = cls(req, comes_from, link=link, markers=markers,
isolated=isolated, options=options,
wheel_cache=wheel_cache, constraint=constraint)
if extras:
res.extras = pkg_resources.Requirement.parse('__placeholder__' +
extras).extras
return res
def __str__(self):
if self.req:
s = str(self.req)
if self.link:
s += ' from %s' % self.link.url
else:
s = self.link.url if self.link else None
if self.satisfied_by is not None:
s += ' in %s' % display_path(self.satisfied_by.location)
if self.comes_from:
if isinstance(self.comes_from, six.string_types):
comes_from = self.comes_from
else:
comes_from = self.comes_from.from_path()
if comes_from:
s += ' (from %s)' % comes_from
return s
def __repr__(self):
return '<%s object: %s editable=%r>' % (
self.__class__.__name__, str(self), self.editable)
def populate_link(self, finder, upgrade):
"""Ensure that if a link can be found for this, that it is found.
Note that self.link may still be None - if Upgrade is False and the
requirement is already installed.
"""
if self.link is None:
self.link = finder.find_requirement(self, upgrade)
@property
def link(self):
return self._link
@link.setter
def link(self, link):
# Lookup a cached wheel, if possible.
if self._wheel_cache is None:
self._link = link
else:
self._link = self._wheel_cache.cached_wheel(link, self.name)
@property
def specifier(self):
return self.req.specifier
def from_path(self):
if self.req is None:
return None
s = str(self.req)
if self.comes_from:
if isinstance(self.comes_from, six.string_types):
comes_from = self.comes_from
else:
comes_from = self.comes_from.from_path()
if comes_from:
s += '->' + comes_from
return s
def build_location(self, build_dir):
if self._temp_build_dir is not None:
return self._temp_build_dir
if self.req is None:
# for requirement via a path to a directory: the name of the
# package is not available yet so we create a temp directory
# Once run_egg_info will have run, we'll be able
# to fix it via _correct_build_location
self._temp_build_dir = tempfile.mkdtemp('-build', 'pip-')
self._ideal_build_dir = build_dir
return self._temp_build_dir
if self.editable:
name = self.name.lower()
else:
name = self.name
# FIXME: Is there a better place to create the build_dir? (hg and bzr
# need this)
if not os.path.exists(build_dir):
logger.debug('Creating directory %s', build_dir)
_make_build_dir(build_dir)
return os.path.join(build_dir, name)
def _correct_build_location(self):
"""Move self._temp_build_dir to self._ideal_build_dir/self.req.name
For some requirements (e.g. a path to a directory), the name of the
package is not available until we run egg_info, so the build_location
will return a temporary directory and store the _ideal_build_dir.
This is only called by self.egg_info_path to fix the temporary build
directory.
"""
if self.source_dir is not None:
return
assert self.req is not None
assert self._temp_build_dir
assert self._ideal_build_dir
old_location = self._temp_build_dir
self._temp_build_dir = None
new_location = self.build_location(self._ideal_build_dir)
if os.path.exists(new_location):
raise InstallationError(
'A package already exists in %s; please remove it to continue'
% display_path(new_location))
logger.debug(
'Moving package %s from %s to new location %s',
self, display_path(old_location), display_path(new_location),
)
shutil.move(old_location, new_location)
self._temp_build_dir = new_location
self._ideal_build_dir = None
self.source_dir = new_location
self._egg_info_path = None
@property
def name(self):
if self.req is None:
return None
return native_str(self.req.project_name)
@property
def setup_py(self):
assert self.source_dir, "No source dir for %s" % self
try:
import setuptools # noqa
except ImportError:
# Setuptools is not available
raise InstallationError(
"setuptools must be installed to install from a source "
"distribution"
)
setup_file = 'setup.py'
if self.editable_options and 'subdirectory' in self.editable_options:
setup_py = os.path.join(self.source_dir,
self.editable_options['subdirectory'],
setup_file)
else:
setup_py = os.path.join(self.source_dir, setup_file)
# Python2 __file__ should not be unicode
if six.PY2 and isinstance(setup_py, six.text_type):
setup_py = setup_py.encode(sys.getfilesystemencoding())
return setup_py
def run_egg_info(self):
assert self.source_dir
if self.name:
logger.debug(
'Running setup.py (path:%s) egg_info for package %s',
self.setup_py, self.name,
)
else:
logger.debug(
'Running setup.py (path:%s) egg_info for package from %s',
self.setup_py, self.link,
)
with indent_log():
script = self._run_setup_py
script = script.replace('__SETUP_PY__', repr(self.setup_py))
script = script.replace('__PKG_NAME__', repr(self.name))
base_cmd = [sys.executable, '-c', script]
if self.isolated:
base_cmd += ["--no-user-cfg"]
egg_info_cmd = base_cmd + ['egg_info']
# We can't put the .egg-info files at the root, because then the
# source code will be mistaken for an installed egg, causing
# problems
if self.editable:
egg_base_option = []
else:
egg_info_dir = os.path.join(self.source_dir, 'pip-egg-info')
ensure_dir(egg_info_dir)
egg_base_option = ['--egg-base', 'pip-egg-info']
cwd = self.source_dir
if self.editable_options and \
'subdirectory' in self.editable_options:
cwd = os.path.join(cwd, self.editable_options['subdirectory'])
call_subprocess(
egg_info_cmd + egg_base_option,
cwd=cwd,
show_stdout=False,
command_level=logging.DEBUG,
command_desc='python setup.py egg_info')
if not self.req:
if isinstance(
pkg_resources.parse_version(self.pkg_info()["Version"]),
Version):
op = "=="
else:
op = "==="
self.req = pkg_resources.Requirement.parse(
"".join([
self.pkg_info()["Name"],
op,
self.pkg_info()["Version"],
]))
self._correct_build_location()
# FIXME: This is a lame hack, entirely for PasteScript which has
# a self-provided entry point that causes this awkwardness
_run_setup_py = """
__file__ = __SETUP_PY__
from setuptools.command import egg_info
import pkg_resources
import os
import tokenize
def replacement_run(self):
self.mkpath(self.egg_info)
installer = self.distribution.fetch_build_egg
for ep in pkg_resources.iter_entry_points('egg_info.writers'):
# require=False is the change we're making:
writer = ep.load(require=False)
if writer:
writer(self, ep.name, os.path.join(self.egg_info,ep.name))
self.find_sources()
egg_info.egg_info.run = replacement_run
exec(compile(
getattr(tokenize, 'open', open)(__file__).read().replace('\\r\\n', '\\n'),
__file__,
'exec'
))
"""
def egg_info_data(self, filename):
if self.satisfied_by is not None:
if not self.satisfied_by.has_metadata(filename):
return None
return self.satisfied_by.get_metadata(filename)
assert self.source_dir
filename = self.egg_info_path(filename)
if not os.path.exists(filename):
return None
data = read_text_file(filename)
return data
def egg_info_path(self, filename):
if self._egg_info_path is None:
if self.editable:
base = self.source_dir
else:
base = os.path.join(self.source_dir, 'pip-egg-info')
filenames = os.listdir(base)
if self.editable:
filenames = []
for root, dirs, files in os.walk(base):
for dir in vcs.dirnames:
if dir in dirs:
dirs.remove(dir)
# Iterate over a copy of ``dirs``, since mutating
# a list while iterating over it can cause trouble.
# (See https://github.com/pypa/pip/pull/462.)
for dir in list(dirs):
# Don't search in anything that looks like a virtualenv
# environment
if (
os.path.exists(
os.path.join(root, dir, 'bin', 'python')
) or
os.path.exists(
os.path.join(
root, dir, 'Scripts', 'Python.exe'
)
)):
dirs.remove(dir)
# Also don't search through tests
elif dir == 'test' or dir == 'tests':
dirs.remove(dir)
filenames.extend([os.path.join(root, dir)
for dir in dirs])
filenames = [f for f in filenames if f.endswith('.egg-info')]
if not filenames:
raise InstallationError(
'No files/directories in %s (from %s)' % (base, filename)
)
assert filenames, \
"No files/directories in %s (from %s)" % (base, filename)
# if we have more than one match, we pick the toplevel one. This
# can easily be the case if there is a dist folder which contains
# an extracted tarball for testing purposes.
if len(filenames) > 1:
filenames.sort(
key=lambda x: x.count(os.path.sep) +
(os.path.altsep and x.count(os.path.altsep) or 0)
)
self._egg_info_path = os.path.join(base, filenames[0])
return os.path.join(self._egg_info_path, filename)
def pkg_info(self):
p = FeedParser()
data = self.egg_info_data('PKG-INFO')
if not data:
logger.warning(
'No PKG-INFO file found in %s',
display_path(self.egg_info_path('PKG-INFO')),
)
p.feed(data or '')
return p.close()
_requirements_section_re = re.compile(r'\[(.*?)\]')
@property
def installed_version(self):
return get_installed_version(self.name)
def assert_source_matches_version(self):
assert self.source_dir
version = self.pkg_info()['version']
if version not in self.req:
logger.warning(
'Requested %s, but installing version %s',
self,
self.installed_version,
)
else:
logger.debug(
'Source in %s has version %s, which satisfies requirement %s',
display_path(self.source_dir),
version,
self,
)
def update_editable(self, obtain=True):
if not self.link:
logger.debug(
"Cannot update repository at %s; repository location is "
"unknown",
self.source_dir,
)
return
assert self.editable
assert self.source_dir
if self.link.scheme == 'file':
# Static paths don't get updated
return
assert '+' in self.link.url, "bad url: %r" % self.link.url
if not self.update:
return
vc_type, url = self.link.url.split('+', 1)
backend = vcs.get_backend(vc_type)
if backend:
vcs_backend = backend(self.link.url)
if obtain:
vcs_backend.obtain(self.source_dir)
else:
vcs_backend.export(self.source_dir)
else:
assert 0, (
'Unexpected version control type (in %s): %s'
% (self.link, vc_type))
def uninstall(self, auto_confirm=False):
"""
Uninstall the distribution currently satisfying this requirement.
Prompts before removing or modifying files unless
``auto_confirm`` is True.
Refuses to delete or modify files outside of ``sys.prefix`` -
thus uninstallation within a virtual environment can only
modify that virtual environment, even if the virtualenv is
linked to global site-packages.
"""
if not self.check_if_exists():
raise UninstallationError(
"Cannot uninstall requirement %s, not installed" % (self.name,)
)
dist = self.satisfied_by or self.conflicts_with
paths_to_remove = UninstallPathSet(dist)
develop_egg_link = egg_link_path(dist)
develop_egg_link_egg_info = '{0}.egg-info'.format(
pkg_resources.to_filename(dist.project_name))
egg_info_exists = dist.egg_info and os.path.exists(dist.egg_info)
# Special case for distutils installed package
distutils_egg_info = getattr(dist._provider, 'path', None)
# Uninstall cases order do matter as in the case of 2 installs of the
# same package, pip needs to uninstall the currently detected version
if (egg_info_exists and dist.egg_info.endswith('.egg-info') and
not dist.egg_info.endswith(develop_egg_link_egg_info)):
# if dist.egg_info.endswith(develop_egg_link_egg_info), we
# are in fact in the develop_egg_link case
paths_to_remove.add(dist.egg_info)
if dist.has_metadata('installed-files.txt'):
for installed_file in dist.get_metadata(
'installed-files.txt').splitlines():
path = os.path.normpath(
os.path.join(dist.egg_info, installed_file)
)
paths_to_remove.add(path)
# FIXME: need a test for this elif block
# occurs with --single-version-externally-managed/--record outside
# of pip
elif dist.has_metadata('top_level.txt'):
if dist.has_metadata('namespace_packages.txt'):
namespaces = dist.get_metadata('namespace_packages.txt')
else:
namespaces = []
for top_level_pkg in [
p for p
in dist.get_metadata('top_level.txt').splitlines()
if p and p not in namespaces]:
path = os.path.join(dist.location, top_level_pkg)
paths_to_remove.add(path)
paths_to_remove.add(path + '.py')
paths_to_remove.add(path + '.pyc')
elif distutils_egg_info:
warnings.warn(
"Uninstalling a distutils installed project ({0}) has been "
"deprecated and will be removed in a future version. This is "
"due to the fact that uninstalling a distutils project will "
"only partially uninstall the project.".format(self.name),
RemovedInPip8Warning,
)
paths_to_remove.add(distutils_egg_info)
elif dist.location.endswith('.egg'):
# package installed by easy_install
# We cannot match on dist.egg_name because it can slightly vary
# i.e. setuptools-0.6c11-py2.6.egg vs setuptools-0.6rc11-py2.6.egg
paths_to_remove.add(dist.location)
easy_install_egg = os.path.split(dist.location)[1]
easy_install_pth = os.path.join(os.path.dirname(dist.location),
'easy-install.pth')
paths_to_remove.add_pth(easy_install_pth, './' + easy_install_egg)
elif develop_egg_link:
# develop egg
with open(develop_egg_link, 'r') as fh:
link_pointer = os.path.normcase(fh.readline().strip())
assert (link_pointer == dist.location), (
'Egg-link %s does not match installed location of %s '
'(at %s)' % (link_pointer, self.name, dist.location)
)
paths_to_remove.add(develop_egg_link)
easy_install_pth = os.path.join(os.path.dirname(develop_egg_link),
'easy-install.pth')
paths_to_remove.add_pth(easy_install_pth, dist.location)
elif egg_info_exists and dist.egg_info.endswith('.dist-info'):
for path in pip.wheel.uninstallation_paths(dist):
paths_to_remove.add(path)
else:
logger.debug(
'Not sure how to uninstall: %s - Check: %s',
dist, dist.location)
# find distutils scripts= scripts
if dist.has_metadata('scripts') and dist.metadata_isdir('scripts'):
for script in dist.metadata_listdir('scripts'):
if dist_in_usersite(dist):
bin_dir = bin_user
else:
bin_dir = bin_py
paths_to_remove.add(os.path.join(bin_dir, script))
if WINDOWS:
paths_to_remove.add(os.path.join(bin_dir, script) + '.bat')
# find console_scripts
if dist.has_metadata('entry_points.txt'):
config = configparser.SafeConfigParser()
config.readfp(
FakeFile(dist.get_metadata_lines('entry_points.txt'))
)
if config.has_section('console_scripts'):
for name, value in config.items('console_scripts'):
if dist_in_usersite(dist):
bin_dir = bin_user
else:
bin_dir = bin_py
paths_to_remove.add(os.path.join(bin_dir, name))
if WINDOWS:
paths_to_remove.add(
os.path.join(bin_dir, name) + '.exe'
)
paths_to_remove.add(
os.path.join(bin_dir, name) + '.exe.manifest'
)
paths_to_remove.add(
os.path.join(bin_dir, name) + '-script.py'
)
paths_to_remove.remove(auto_confirm)
self.uninstalled = paths_to_remove
def rollback_uninstall(self):
if self.uninstalled:
self.uninstalled.rollback()
else:
logger.error(
"Can't rollback %s, nothing uninstalled.", self.project_name,
)
def commit_uninstall(self):
if self.uninstalled:
self.uninstalled.commit()
else:
logger.error(
"Can't commit %s, nothing uninstalled.", self.project_name,
)
def archive(self, build_dir):
assert self.source_dir
create_archive = True
archive_name = '%s-%s.zip' % (self.name, self.pkg_info()["version"])
archive_path = os.path.join(build_dir, archive_name)
if os.path.exists(archive_path):
response = ask_path_exists(
'The file %s exists. (i)gnore, (w)ipe, (b)ackup ' %
display_path(archive_path), ('i', 'w', 'b'))
if response == 'i':
create_archive = False
elif response == 'w':
logger.warning('Deleting %s', display_path(archive_path))
os.remove(archive_path)
elif response == 'b':
dest_file = backup_dir(archive_path)
logger.warning(
'Backing up %s to %s',
display_path(archive_path),
display_path(dest_file),
)
shutil.move(archive_path, dest_file)
if create_archive:
zip = zipfile.ZipFile(
archive_path, 'w', zipfile.ZIP_DEFLATED,
allowZip64=True
)
dir = os.path.normcase(os.path.abspath(self.source_dir))
for dirpath, dirnames, filenames in os.walk(dir):
if 'pip-egg-info' in dirnames:
dirnames.remove('pip-egg-info')
for dirname in dirnames:
dirname = os.path.join(dirpath, dirname)
name = self._clean_zip_name(dirname, dir)
zipdir = zipfile.ZipInfo(self.name + '/' + name + '/')
zipdir.external_attr = 0x1ED << 16 # 0o755
zip.writestr(zipdir, '')
for filename in filenames:
if filename == PIP_DELETE_MARKER_FILENAME:
continue
filename = os.path.join(dirpath, filename)
name = self._clean_zip_name(filename, dir)
zip.write(filename, self.name + '/' + name)
zip.close()
logger.info('Saved %s', display_path(archive_path))
def _clean_zip_name(self, name, prefix):
assert name.startswith(prefix + os.path.sep), (
"name %r doesn't start with prefix %r" % (name, prefix)
)
name = name[len(prefix) + 1:]
name = name.replace(os.path.sep, '/')
return name
def match_markers(self):
if self.markers is not None:
return markers_interpret(self.markers)
else:
return True
def install(self, install_options, global_options=[], root=None):
if self.editable:
self.install_editable(install_options, global_options)
return
if self.is_wheel:
version = pip.wheel.wheel_version(self.source_dir)
pip.wheel.check_compatibility(version, self.name)
self.move_wheel_files(self.source_dir, root=root)
self.install_succeeded = True
return
# Extend the list of global and install options passed on to
# the setup.py call with the ones from the requirements file.
# Options specified in requirements file override those
# specified on the command line, since the last option given
# to setup.py is the one that is used.
global_options += self.options.get('global_options', [])
install_options += self.options.get('install_options', [])
if self.isolated:
global_options = list(global_options) + ["--no-user-cfg"]
temp_location = tempfile.mkdtemp('-record', 'pip-')
record_filename = os.path.join(temp_location, 'install-record.txt')
try:
install_args = [sys.executable]
install_args.append('-c')
install_args.append(
"import setuptools, tokenize;__file__=%r;"
"exec(compile(getattr(tokenize, 'open', open)(__file__).read()"
".replace('\\r\\n', '\\n'), __file__, 'exec'))" % self.setup_py
)
install_args += list(global_options) + \
['install', '--record', record_filename]
if not self.as_egg:
install_args += ['--single-version-externally-managed']
if root is not None:
install_args += ['--root', root]
if self.pycompile:
install_args += ["--compile"]
else:
install_args += ["--no-compile"]
if running_under_virtualenv():
py_ver_str = 'python' + sysconfig.get_python_version()
install_args += ['--install-headers',
os.path.join(sys.prefix, 'include', 'site',
py_ver_str, self.name)]
logger.info('Running setup.py install for %s', self.name)
with indent_log():
call_subprocess(
install_args + install_options,
cwd=self.source_dir,
show_stdout=False,
)
if not os.path.exists(record_filename):
logger.debug('Record file %s not found', record_filename)
return
self.install_succeeded = True
if self.as_egg:
# there's no --always-unzip option we can pass to install
# command so we unable to save the installed-files.txt
return
def prepend_root(path):
if root is None or not os.path.isabs(path):
return path
else:
return change_root(root, path)
with open(record_filename) as f:
for line in f:
directory = os.path.dirname(line)
if directory.endswith('.egg-info'):
egg_info_dir = prepend_root(directory)
break
else:
logger.warning(
'Could not find .egg-info directory in install record'
' for %s',
self,
)
# FIXME: put the record somewhere
# FIXME: should this be an error?
return
new_lines = []
with open(record_filename) as f:
for line in f:
filename = line.strip()
if os.path.isdir(filename):
filename += os.path.sep
new_lines.append(
make_path_relative(
prepend_root(filename), egg_info_dir)
)
inst_files_path = os.path.join(egg_info_dir, 'installed-files.txt')
with open(inst_files_path, 'w') as f:
f.write('\n'.join(new_lines) + '\n')
finally:
if os.path.exists(record_filename):
os.remove(record_filename)
rmtree(temp_location)
def ensure_has_source_dir(self, parent_dir):
"""Ensure that a source_dir is set.
This will create a temporary build dir if the name of the requirement
isn't known yet.
:param parent_dir: The ideal pip parent_dir for the source_dir.
Generally src_dir for editables and build_dir for sdists.
:return: self.source_dir
"""
if self.source_dir is None:
self.source_dir = self.build_location(parent_dir)
return self.source_dir
def remove_temporary_source(self):
"""Remove the source files from this requirement, if they are marked
for deletion"""
if self.source_dir and os.path.exists(
os.path.join(self.source_dir, PIP_DELETE_MARKER_FILENAME)):
logger.debug('Removing source in %s', self.source_dir)
rmtree(self.source_dir)
self.source_dir = None
if self._temp_build_dir and os.path.exists(self._temp_build_dir):
rmtree(self._temp_build_dir)
self._temp_build_dir = None
def install_editable(self, install_options, global_options=()):
logger.info('Running setup.py develop for %s', self.name)
if self.isolated:
global_options = list(global_options) + ["--no-user-cfg"]
with indent_log():
# FIXME: should we do --install-headers here too?
cwd = self.source_dir
if self.editable_options and \
'subdirectory' in self.editable_options:
cwd = os.path.join(cwd, self.editable_options['subdirectory'])
call_subprocess(
[
sys.executable,
'-c',
"import setuptools, tokenize; __file__=%r; exec(compile("
"getattr(tokenize, 'open', open)(__file__).read().replace"
"('\\r\\n', '\\n'), __file__, 'exec'))" % self.setup_py
] +
list(global_options) +
['develop', '--no-deps'] +
list(install_options),
cwd=cwd,
show_stdout=False)
self.install_succeeded = True
def check_if_exists(self):
"""Find an installed distribution that satisfies or conflicts
with this requirement, and set self.satisfied_by or
self.conflicts_with appropriately.
"""
if self.req is None:
return False
try:
self.satisfied_by = pkg_resources.get_distribution(self.req)
except pkg_resources.DistributionNotFound:
return False
except pkg_resources.VersionConflict:
existing_dist = pkg_resources.get_distribution(
self.req.project_name
)
if self.use_user_site:
if dist_in_usersite(existing_dist):
self.conflicts_with = existing_dist
elif (running_under_virtualenv() and
dist_in_site_packages(existing_dist)):
raise InstallationError(
"Will not install to the user site because it will "
"lack sys.path precedence to %s in %s" %
(existing_dist.project_name, existing_dist.location)
)
else:
self.conflicts_with = existing_dist
return True
@property
def is_wheel(self):
return self.link and self.link.is_wheel
def move_wheel_files(self, wheeldir, root=None):
move_wheel_files(
self.name, self.req, wheeldir,
user=self.use_user_site,
home=self.target_dir,
root=root,
pycompile=self.pycompile,
isolated=self.isolated,
)
def get_dist(self):
"""Return a pkg_resources.Distribution built from self.egg_info_path"""
egg_info = self.egg_info_path('').rstrip('/')
base_dir = os.path.dirname(egg_info)
metadata = pkg_resources.PathMetadata(base_dir, egg_info)
dist_name = os.path.splitext(os.path.basename(egg_info))[0]
return pkg_resources.Distribution(
os.path.dirname(egg_info),
project_name=dist_name,
metadata=metadata)
def _strip_postfix(req):
"""
Strip req postfix ( -dev, 0.2, etc )
"""
# FIXME: use package_to_requirement?
match = re.search(r'^(.*?)(?:-dev|-\d.*)$', req)
if match:
# Strip off -dev, -0.2, etc.
req = match.group(1)
return req
def _build_req_from_url(url):
parts = [p for p in url.split('#', 1)[0].split('/') if p]
req = None
if parts[-2] in ('tags', 'branches', 'tag', 'branch'):
req = parts[-3]
elif parts[-1] == 'trunk':
req = parts[-2]
return req
def _build_editable_options(req):
"""
This method generates a dictionary of the query string
parameters contained in a given editable URL.
"""
regexp = re.compile(r"[\?#&](?P<name>[^&=]+)=(?P<value>[^&=]+)")
matched = regexp.findall(req)
if matched:
ret = dict()
for option in matched:
(name, value) = option
if name in ret:
raise Exception("%s option already defined" % name)
ret[name] = value
return ret
return None
def parse_editable(editable_req, default_vcs=None):
"""Parses an editable requirement into:
- a requirement name
- an URL
- extras
- editable options
Accepted requirements:
svn+http://blahblah@rev#egg=Foobar[baz]&subdirectory=version_subdir
.[some_extra]
"""
url = editable_req
extras = None
# If a file path is specified with extras, strip off the extras.
m = re.match(r'^(.+)(\[[^\]]+\])$', url)
if m:
url_no_extras = m.group(1)
extras = m.group(2)
else:
url_no_extras = url
if os.path.isdir(url_no_extras):
if not os.path.exists(os.path.join(url_no_extras, 'setup.py')):
raise InstallationError(
"Directory %r is not installable. File 'setup.py' not found." %
url_no_extras
)
# Treating it as code that has already been checked out
url_no_extras = path_to_url(url_no_extras)
if url_no_extras.lower().startswith('file:'):
if extras:
return (
None,
url_no_extras,
pkg_resources.Requirement.parse(
'__placeholder__' + extras
).extras,
{},
)
else:
return None, url_no_extras, None, {}
for version_control in vcs:
if url.lower().startswith('%s:' % version_control):
url = '%s+%s' % (version_control, url)
break
if '+' not in url:
if default_vcs:
url = default_vcs + '+' + url
else:
raise InstallationError(
'%s should either be a path to a local project or a VCS url '
'beginning with svn+, git+, hg+, or bzr+' %
editable_req
)
vc_type = url.split('+', 1)[0].lower()
if not vcs.get_backend(vc_type):
error_message = 'For --editable=%s only ' % editable_req + \
', '.join([backend.name + '+URL' for backend in vcs.backends]) + \
' is currently supported'
raise InstallationError(error_message)
try:
options = _build_editable_options(editable_req)
except Exception as exc:
raise InstallationError(
'--editable=%s error in editable options:%s' % (editable_req, exc)
)
if not options or 'egg' not in options:
req = _build_req_from_url(editable_req)
if not req:
raise InstallationError(
'--editable=%s is not the right format; it must have '
'#egg=Package' % editable_req
)
else:
req = options['egg']
package = _strip_postfix(req)
return package, url, None, options
| mit |
jdepoix/goto_cloud | goto_cloud/operating_system_support/tests/test_operating_system_support.py | 1 | 2975 | import unittest
from django.test import TestCase
from operating_system.public import OperatingSystem
from remote_host.public import RemoteHost
from ..operating_system_support import OperatingSystemRelations, AbstractedRemoteHostOperator
RELATION_MOCK = {
'1': {
'2': {
'3': {
'4': '5',
'6': '7',
}
},
'8': '9',
'10': '11',
},
'12': {
'13': '14',
'15': '16',
}
}
class TestOperationA():
pass
class TestOperationB():
pass
class AbstractedRemoteHostOperatorTestImplementation(AbstractedRemoteHostOperator):
def _get_operating_systems_to_supported_operation_mapping(self):
return {
('2', '8',): TestOperationA,
('13',): TestOperationB,
}
def _init_operator_class(self, operator_class):
return operator_class()
class TestOperatingSystemRelations(unittest.TestCase):
def setUp(self):
OperatingSystemRelations._RELATIONS = RELATION_MOCK
def test_get_subsystems(self):
self.assertEqual(
set(OperatingSystemRelations('2').get_subsystems()),
{'3', '4', '5', '6', '7'}
)
def test_get_subsystems__no_subsystems(self):
self.assertEquals(OperatingSystemRelations('11').get_subsystems(), [])
def test_get_subsystems__no_relations_known(self):
self.assertEquals(OperatingSystemRelations('999').get_subsystems(), [])
def test_is_parent_of(self):
self.assertTrue(OperatingSystemRelations('3').is_parent_of('5'))
def test_is_child_of(self):
self.assertTrue(OperatingSystemRelations('3').is_child_of('1'))
class TestAbstractedRemoteHostOperator(TestCase):
def setUp(self):
OperatingSystemRelations._RELATIONS = RELATION_MOCK
def test_initialization(self):
self.assertTrue(
isinstance(
AbstractedRemoteHostOperatorTestImplementation(RemoteHost.objects.create(os='8')).operator,
TestOperationA
)
)
self.assertTrue(
isinstance(
AbstractedRemoteHostOperatorTestImplementation(RemoteHost.objects.create(os='13')).operator,
TestOperationB
)
)
def test_initialization__related(self):
self.assertTrue(
isinstance(
AbstractedRemoteHostOperatorTestImplementation(RemoteHost.objects.create(os='4')).operator,
TestOperationA
)
)
self.assertTrue(
isinstance(
AbstractedRemoteHostOperatorTestImplementation(RemoteHost.objects.create(os='14')).operator,
TestOperationB
)
)
def test_initialization__not_supported(self):
with self.assertRaises(OperatingSystem.NotSupportedException):
AbstractedRemoteHostOperatorTestImplementation(RemoteHost.objects.create(os='16'))
| mit |
matthiasdiener/spack | var/spack/repos/builtin/packages/dri3proto/package.py | 5 | 1811 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Dri3proto(AutotoolsPackage):
"""Direct Rendering Infrastructure 3 Extension.
This extension defines a protocol to securely allow user applications to
access the video hardware without requiring data to be passed through the
X server."""
homepage = "https://cgit.freedesktop.org/xorg/proto/dri3proto/"
url = "https://www.x.org/releases/individual/proto/dri3proto-1.0.tar.gz"
version('1.0', '25e84a49a076862277ee12aebd49ff5f')
depends_on('pkgconfig', type='build')
depends_on('util-macros', type='build')
| lgpl-2.1 |
malkoto1/just_cook | SQLAlchemy-1.0.4/lib/sqlalchemy/events.py | 2 | 42935 | # sqlalchemy/events.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Core event interfaces."""
from . import event, exc
from .pool import Pool
from .engine import Connectable, Engine, Dialect
from .sql.base import SchemaEventTarget
class DDLEvents(event.Events):
"""
Define event listeners for schema objects,
that is, :class:`.SchemaItem` and other :class:`.SchemaEventTarget`
subclasses, including :class:`.MetaData`, :class:`.Table`,
:class:`.Column`.
:class:`.MetaData` and :class:`.Table` support events
specifically regarding when CREATE and DROP
DDL is emitted to the database.
Attachment events are also provided to customize
behavior whenever a child schema element is associated
with a parent, such as, when a :class:`.Column` is associated
with its :class:`.Table`, when a :class:`.ForeignKeyConstraint`
is associated with a :class:`.Table`, etc.
Example using the ``after_create`` event::
from sqlalchemy import event
from sqlalchemy import Table, Column, Metadata, Integer
m = MetaData()
some_table = Table('some_table', m, Column('data', Integer))
def after_create(target, connection, **kw):
connection.execute("ALTER TABLE %s SET name=foo_%s" %
(target.name, target.name))
event.listen(some_table, "after_create", after_create)
DDL events integrate closely with the
:class:`.DDL` class and the :class:`.DDLElement` hierarchy
of DDL clause constructs, which are themselves appropriate
as listener callables::
from sqlalchemy import DDL
event.listen(
some_table,
"after_create",
DDL("ALTER TABLE %(table)s SET name=foo_%(table)s")
)
The methods here define the name of an event as well
as the names of members that are passed to listener
functions.
See also:
:ref:`event_toplevel`
:class:`.DDLElement`
:class:`.DDL`
:ref:`schema_ddl_sequences`
"""
_target_class_doc = "SomeSchemaClassOrObject"
_dispatch_target = SchemaEventTarget
def before_create(self, target, connection, **kw):
"""Called before CREATE statements are emitted.
:param target: the :class:`.MetaData` or :class:`.Table`
object which is the target of the event.
:param connection: the :class:`.Connection` where the
CREATE statement or statements will be emitted.
:param \**kw: additional keyword arguments relevant
to the event. The contents of this dictionary
may vary across releases, and include the
list of tables being generated for a metadata-level
event, the checkfirst flag, and other
elements used by internal events.
"""
def after_create(self, target, connection, **kw):
"""Called after CREATE statements are emitted.
:param target: the :class:`.MetaData` or :class:`.Table`
object which is the target of the event.
:param connection: the :class:`.Connection` where the
CREATE statement or statements have been emitted.
:param \**kw: additional keyword arguments relevant
to the event. The contents of this dictionary
may vary across releases, and include the
list of tables being generated for a metadata-level
event, the checkfirst flag, and other
elements used by internal events.
"""
def before_drop(self, target, connection, **kw):
"""Called before DROP statements are emitted.
:param target: the :class:`.MetaData` or :class:`.Table`
object which is the target of the event.
:param connection: the :class:`.Connection` where the
DROP statement or statements will be emitted.
:param \**kw: additional keyword arguments relevant
to the event. The contents of this dictionary
may vary across releases, and include the
list of tables being generated for a metadata-level
event, the checkfirst flag, and other
elements used by internal events.
"""
def after_drop(self, target, connection, **kw):
"""Called after DROP statements are emitted.
:param target: the :class:`.MetaData` or :class:`.Table`
object which is the target of the event.
:param connection: the :class:`.Connection` where the
DROP statement or statements have been emitted.
:param \**kw: additional keyword arguments relevant
to the event. The contents of this dictionary
may vary across releases, and include the
list of tables being generated for a metadata-level
event, the checkfirst flag, and other
elements used by internal events.
"""
def before_parent_attach(self, target, parent):
"""Called before a :class:`.SchemaItem` is associated with
a parent :class:`.SchemaItem`.
:param target: the target object
:param parent: the parent to which the target is being attached.
:func:`.event.listen` also accepts a modifier for this event:
:param propagate=False: When True, the listener function will
be established for any copies made of the target object,
i.e. those copies that are generated when
:meth:`.Table.tometadata` is used.
"""
def after_parent_attach(self, target, parent):
"""Called after a :class:`.SchemaItem` is associated with
a parent :class:`.SchemaItem`.
:param target: the target object
:param parent: the parent to which the target is being attached.
:func:`.event.listen` also accepts a modifier for this event:
:param propagate=False: When True, the listener function will
be established for any copies made of the target object,
i.e. those copies that are generated when
:meth:`.Table.tometadata` is used.
"""
def column_reflect(self, inspector, table, column_info):
"""Called for each unit of 'column info' retrieved when
a :class:`.Table` is being reflected.
The dictionary of column information as returned by the
dialect is passed, and can be modified. The dictionary
is that returned in each element of the list returned
by :meth:`.reflection.Inspector.get_columns`.
The event is called before any action is taken against
this dictionary, and the contents can be modified.
The :class:`.Column` specific arguments ``info``, ``key``,
and ``quote`` can also be added to the dictionary and
will be passed to the constructor of :class:`.Column`.
Note that this event is only meaningful if either
associated with the :class:`.Table` class across the
board, e.g.::
from sqlalchemy.schema import Table
from sqlalchemy import event
def listen_for_reflect(inspector, table, column_info):
"receive a column_reflect event"
# ...
event.listen(
Table,
'column_reflect',
listen_for_reflect)
...or with a specific :class:`.Table` instance using
the ``listeners`` argument::
def listen_for_reflect(inspector, table, column_info):
"receive a column_reflect event"
# ...
t = Table(
'sometable',
autoload=True,
listeners=[
('column_reflect', listen_for_reflect)
])
This because the reflection process initiated by ``autoload=True``
completes within the scope of the constructor for :class:`.Table`.
"""
class PoolEvents(event.Events):
"""Available events for :class:`.Pool`.
The methods here define the name of an event as well
as the names of members that are passed to listener
functions.
e.g.::
from sqlalchemy import event
def my_on_checkout(dbapi_conn, connection_rec, connection_proxy):
"handle an on checkout event"
event.listen(Pool, 'checkout', my_on_checkout)
In addition to accepting the :class:`.Pool` class and
:class:`.Pool` instances, :class:`.PoolEvents` also accepts
:class:`.Engine` objects and the :class:`.Engine` class as
targets, which will be resolved to the ``.pool`` attribute of the
given engine or the :class:`.Pool` class::
engine = create_engine("postgresql://scott:tiger@localhost/test")
# will associate with engine.pool
event.listen(engine, 'checkout', my_on_checkout)
"""
_target_class_doc = "SomeEngineOrPool"
_dispatch_target = Pool
@classmethod
def _accept_with(cls, target):
if isinstance(target, type):
if issubclass(target, Engine):
return Pool
elif issubclass(target, Pool):
return target
elif isinstance(target, Engine):
return target.pool
else:
return target
def connect(self, dbapi_connection, connection_record):
"""Called at the moment a particular DBAPI connection is first
created for a given :class:`.Pool`.
This event allows one to capture the point directly after which
the DBAPI module-level ``.connect()`` method has been used in order
to produce a new DBAPI connection.
:param dbapi_connection: a DBAPI connection.
:param connection_record: the :class:`._ConnectionRecord` managing the
DBAPI connection.
"""
def first_connect(self, dbapi_connection, connection_record):
"""Called exactly once for the first time a DBAPI connection is
checked out from a particular :class:`.Pool`.
The rationale for :meth:`.PoolEvents.first_connect` is to determine
information about a particular series of database connections based
on the settings used for all connections. Since a particular
:class:`.Pool` refers to a single "creator" function (which in terms
of a :class:`.Engine` refers to the URL and connection options used),
it is typically valid to make observations about a single connection
that can be safely assumed to be valid about all subsequent
connections, such as the database version, the server and client
encoding settings, collation settings, and many others.
:param dbapi_connection: a DBAPI connection.
:param connection_record: the :class:`._ConnectionRecord` managing the
DBAPI connection.
"""
def checkout(self, dbapi_connection, connection_record, connection_proxy):
"""Called when a connection is retrieved from the Pool.
:param dbapi_connection: a DBAPI connection.
:param connection_record: the :class:`._ConnectionRecord` managing the
DBAPI connection.
:param connection_proxy: the :class:`._ConnectionFairy` object which
will proxy the public interface of the DBAPI connection for the
lifespan of the checkout.
If you raise a :class:`~sqlalchemy.exc.DisconnectionError`, the current
connection will be disposed and a fresh connection retrieved.
Processing of all checkout listeners will abort and restart
using the new connection.
.. seealso:: :meth:`.ConnectionEvents.engine_connect` - a similar event
which occurs upon creation of a new :class:`.Connection`.
"""
def checkin(self, dbapi_connection, connection_record):
"""Called when a connection returns to the pool.
Note that the connection may be closed, and may be None if the
connection has been invalidated. ``checkin`` will not be called
for detached connections. (They do not return to the pool.)
:param dbapi_connection: a DBAPI connection.
:param connection_record: the :class:`._ConnectionRecord` managing the
DBAPI connection.
"""
def reset(self, dbapi_connection, connection_record):
"""Called before the "reset" action occurs for a pooled connection.
This event represents
when the ``rollback()`` method is called on the DBAPI connection
before it is returned to the pool. The behavior of "reset" can
be controlled, including disabled, using the ``reset_on_return``
pool argument.
The :meth:`.PoolEvents.reset` event is usually followed by the
:meth:`.PoolEvents.checkin` event is called, except in those
cases where the connection is discarded immediately after reset.
:param dbapi_connection: a DBAPI connection.
:param connection_record: the :class:`._ConnectionRecord` managing the
DBAPI connection.
.. versionadded:: 0.8
.. seealso::
:meth:`.ConnectionEvents.rollback`
:meth:`.ConnectionEvents.commit`
"""
def invalidate(self, dbapi_connection, connection_record, exception):
"""Called when a DBAPI connection is to be "invalidated".
This event is called any time the :meth:`._ConnectionRecord.invalidate`
method is invoked, either from API usage or via "auto-invalidation",
without the ``soft`` flag.
The event occurs before a final attempt to call ``.close()`` on the
connection occurs.
:param dbapi_connection: a DBAPI connection.
:param connection_record: the :class:`._ConnectionRecord` managing the
DBAPI connection.
:param exception: the exception object corresponding to the reason
for this invalidation, if any. May be ``None``.
.. versionadded:: 0.9.2 Added support for connection invalidation
listening.
.. seealso::
:ref:`pool_connection_invalidation`
"""
def soft_invalidate(self, dbapi_connection, connection_record, exception):
"""Called when a DBAPI connection is to be "soft invalidated".
This event is called any time the :meth:`._ConnectionRecord.invalidate`
method is invoked with the ``soft`` flag.
Soft invalidation refers to when the connection record that tracks
this connection will force a reconnect after the current connection
is checked in. It does not actively close the dbapi_connection
at the point at which it is called.
.. versionadded:: 1.0.3
"""
class ConnectionEvents(event.Events):
"""Available events for :class:`.Connectable`, which includes
:class:`.Connection` and :class:`.Engine`.
The methods here define the name of an event as well as the names of
members that are passed to listener functions.
An event listener can be associated with any :class:`.Connectable`
class or instance, such as an :class:`.Engine`, e.g.::
from sqlalchemy import event, create_engine
def before_cursor_execute(conn, cursor, statement, parameters, context,
executemany):
log.info("Received statement: %s" % statement)
engine = create_engine('postgresql://scott:tiger@localhost/test')
event.listen(engine, "before_cursor_execute", before_cursor_execute)
or with a specific :class:`.Connection`::
with engine.begin() as conn:
@event.listens_for(conn, 'before_cursor_execute')
def before_cursor_execute(conn, cursor, statement, parameters,
context, executemany):
log.info("Received statement: %s" % statement)
When the methods are called with a `statement` parameter, such as in
:meth:`.after_cursor_execute`, :meth:`.before_cursor_execute` and
:meth:`.dbapi_error`, the statement is the exact SQL string that was
prepared for transmission to the DBAPI ``cursor`` in the connection's
:class:`.Dialect`.
The :meth:`.before_execute` and :meth:`.before_cursor_execute`
events can also be established with the ``retval=True`` flag, which
allows modification of the statement and parameters to be sent
to the database. The :meth:`.before_cursor_execute` event is
particularly useful here to add ad-hoc string transformations, such
as comments, to all executions::
from sqlalchemy.engine import Engine
from sqlalchemy import event
@event.listens_for(Engine, "before_cursor_execute", retval=True)
def comment_sql_calls(conn, cursor, statement, parameters,
context, executemany):
statement = statement + " -- some comment"
return statement, parameters
.. note:: :class:`.ConnectionEvents` can be established on any
combination of :class:`.Engine`, :class:`.Connection`, as well
as instances of each of those classes. Events across all
four scopes will fire off for a given instance of
:class:`.Connection`. However, for performance reasons, the
:class:`.Connection` object determines at instantiation time
whether or not its parent :class:`.Engine` has event listeners
established. Event listeners added to the :class:`.Engine`
class or to an instance of :class:`.Engine` *after* the instantiation
of a dependent :class:`.Connection` instance will usually
*not* be available on that :class:`.Connection` instance. The newly
added listeners will instead take effect for :class:`.Connection`
instances created subsequent to those event listeners being
established on the parent :class:`.Engine` class or instance.
:param retval=False: Applies to the :meth:`.before_execute` and
:meth:`.before_cursor_execute` events only. When True, the
user-defined event function must have a return value, which
is a tuple of parameters that replace the given statement
and parameters. See those methods for a description of
specific return arguments.
.. versionchanged:: 0.8 :class:`.ConnectionEvents` can now be associated
with any :class:`.Connectable` including :class:`.Connection`,
in addition to the existing support for :class:`.Engine`.
"""
_target_class_doc = "SomeEngine"
_dispatch_target = Connectable
@classmethod
def _listen(cls, event_key, retval=False):
target, identifier, fn = \
event_key.dispatch_target, event_key.identifier, \
event_key._listen_fn
target._has_events = True
if not retval:
if identifier == 'before_execute':
orig_fn = fn
def wrap_before_execute(conn, clauseelement,
multiparams, params):
orig_fn(conn, clauseelement, multiparams, params)
return clauseelement, multiparams, params
fn = wrap_before_execute
elif identifier == 'before_cursor_execute':
orig_fn = fn
def wrap_before_cursor_execute(conn, cursor, statement,
parameters, context,
executemany):
orig_fn(conn, cursor, statement,
parameters, context, executemany)
return statement, parameters
fn = wrap_before_cursor_execute
elif retval and \
identifier not in ('before_execute',
'before_cursor_execute', 'handle_error'):
raise exc.ArgumentError(
"Only the 'before_execute', "
"'before_cursor_execute' and 'handle_error' engine "
"event listeners accept the 'retval=True' "
"argument.")
event_key.with_wrapper(fn).base_listen()
def before_execute(self, conn, clauseelement, multiparams, params):
"""Intercept high level execute() events, receiving uncompiled
SQL constructs and other objects prior to rendering into SQL.
This event is good for debugging SQL compilation issues as well
as early manipulation of the parameters being sent to the database,
as the parameter lists will be in a consistent format here.
This event can be optionally established with the ``retval=True``
flag. The ``clauseelement``, ``multiparams``, and ``params``
arguments should be returned as a three-tuple in this case::
@event.listens_for(Engine, "before_execute", retval=True)
def before_execute(conn, conn, clauseelement, multiparams, params):
# do something with clauseelement, multiparams, params
return clauseelement, multiparams, params
:param conn: :class:`.Connection` object
:param clauseelement: SQL expression construct, :class:`.Compiled`
instance, or string statement passed to :meth:`.Connection.execute`.
:param multiparams: Multiple parameter sets, a list of dictionaries.
:param params: Single parameter set, a single dictionary.
See also:
:meth:`.before_cursor_execute`
"""
def after_execute(self, conn, clauseelement, multiparams, params, result):
"""Intercept high level execute() events after execute.
:param conn: :class:`.Connection` object
:param clauseelement: SQL expression construct, :class:`.Compiled`
instance, or string statement passed to :meth:`.Connection.execute`.
:param multiparams: Multiple parameter sets, a list of dictionaries.
:param params: Single parameter set, a single dictionary.
:param result: :class:`.ResultProxy` generated by the execution.
"""
def before_cursor_execute(self, conn, cursor, statement,
parameters, context, executemany):
"""Intercept low-level cursor execute() events before execution,
receiving the string SQL statement and DBAPI-specific parameter list to
be invoked against a cursor.
This event is a good choice for logging as well as late modifications
to the SQL string. It's less ideal for parameter modifications except
for those which are specific to a target backend.
This event can be optionally established with the ``retval=True``
flag. The ``statement`` and ``parameters`` arguments should be
returned as a two-tuple in this case::
@event.listens_for(Engine, "before_cursor_execute", retval=True)
def before_cursor_execute(conn, cursor, statement,
parameters, context, executemany):
# do something with statement, parameters
return statement, parameters
See the example at :class:`.ConnectionEvents`.
:param conn: :class:`.Connection` object
:param cursor: DBAPI cursor object
:param statement: string SQL statement, as to be passed to the DBAPI
:param parameters: Dictionary, tuple, or list of parameters being
passed to the ``execute()`` or ``executemany()`` method of the
DBAPI ``cursor``. In some cases may be ``None``.
:param context: :class:`.ExecutionContext` object in use. May
be ``None``.
:param executemany: boolean, if ``True``, this is an ``executemany()``
call, if ``False``, this is an ``execute()`` call.
See also:
:meth:`.before_execute`
:meth:`.after_cursor_execute`
"""
def after_cursor_execute(self, conn, cursor, statement,
parameters, context, executemany):
"""Intercept low-level cursor execute() events after execution.
:param conn: :class:`.Connection` object
:param cursor: DBAPI cursor object. Will have results pending
if the statement was a SELECT, but these should not be consumed
as they will be needed by the :class:`.ResultProxy`.
:param statement: string SQL statement, as passed to the DBAPI
:param parameters: Dictionary, tuple, or list of parameters being
passed to the ``execute()`` or ``executemany()`` method of the
DBAPI ``cursor``. In some cases may be ``None``.
:param context: :class:`.ExecutionContext` object in use. May
be ``None``.
:param executemany: boolean, if ``True``, this is an ``executemany()``
call, if ``False``, this is an ``execute()`` call.
"""
def dbapi_error(self, conn, cursor, statement, parameters,
context, exception):
"""Intercept a raw DBAPI error.
This event is called with the DBAPI exception instance
received from the DBAPI itself, *before* SQLAlchemy wraps the
exception with it's own exception wrappers, and before any
other operations are performed on the DBAPI cursor; the
existing transaction remains in effect as well as any state
on the cursor.
The use case here is to inject low-level exception handling
into an :class:`.Engine`, typically for logging and
debugging purposes.
.. warning::
Code should **not** modify
any state or throw any exceptions here as this will
interfere with SQLAlchemy's cleanup and error handling
routines. For exception modification, please refer to the
new :meth:`.ConnectionEvents.handle_error` event.
Subsequent to this hook, SQLAlchemy may attempt any
number of operations on the connection/cursor, including
closing the cursor, rolling back of the transaction in the
case of connectionless execution, and disposing of the entire
connection pool if a "disconnect" was detected. The
exception is then wrapped in a SQLAlchemy DBAPI exception
wrapper and re-thrown.
:param conn: :class:`.Connection` object
:param cursor: DBAPI cursor object
:param statement: string SQL statement, as passed to the DBAPI
:param parameters: Dictionary, tuple, or list of parameters being
passed to the ``execute()`` or ``executemany()`` method of the
DBAPI ``cursor``. In some cases may be ``None``.
:param context: :class:`.ExecutionContext` object in use. May
be ``None``.
:param exception: The **unwrapped** exception emitted directly from the
DBAPI. The class here is specific to the DBAPI module in use.
.. deprecated:: 0.9.7 - replaced by
:meth:`.ConnectionEvents.handle_error`
"""
def handle_error(self, exception_context):
"""Intercept all exceptions processed by the :class:`.Connection`.
This includes all exceptions emitted by the DBAPI as well as
within SQLAlchemy's statement invocation process, including
encoding errors and other statement validation errors. Other areas
in which the event is invoked include transaction begin and end,
result row fetching, cursor creation.
Note that :meth:`.handle_error` may support new kinds of exceptions
and new calling scenarios at *any time*. Code which uses this
event must expect new calling patterns to be present in minor
releases.
To support the wide variety of members that correspond to an exception,
as well as to allow extensibility of the event without backwards
incompatibility, the sole argument received is an instance of
:class:`.ExceptionContext`. This object contains data members
representing detail about the exception.
Use cases supported by this hook include:
* read-only, low-level exception handling for logging and
debugging purposes
* exception re-writing
The hook is called while the cursor from the failed operation
(if any) is still open and accessible. Special cleanup operations
can be called on this cursor; SQLAlchemy will attempt to close
this cursor subsequent to this hook being invoked. If the connection
is in "autocommit" mode, the transaction also remains open within
the scope of this hook; the rollback of the per-statement transaction
also occurs after the hook is called.
The user-defined event handler has two options for replacing
the SQLAlchemy-constructed exception into one that is user
defined. It can either raise this new exception directly, in
which case all further event listeners are bypassed and the
exception will be raised, after appropriate cleanup as taken
place::
@event.listens_for(Engine, "handle_error")
def handle_exception(context):
if isinstance(context.original_exception,
psycopg2.OperationalError) and \\
"failed" in str(context.original_exception):
raise MySpecialException("failed operation")
.. warning:: Because the :meth:`.ConnectionEvents.handle_error`
event specifically provides for exceptions to be re-thrown as
the ultimate exception raised by the failed statement,
**stack traces will be misleading** if the user-defined event
handler itself fails and throws an unexpected exception;
the stack trace may not illustrate the actual code line that
failed! It is advised to code carefully here and use
logging and/or inline debugging if unexpected exceptions are
occurring.
Alternatively, a "chained" style of event handling can be
used, by configuring the handler with the ``retval=True``
modifier and returning the new exception instance from the
function. In this case, event handling will continue onto the
next handler. The "chained" exception is available using
:attr:`.ExceptionContext.chained_exception`::
@event.listens_for(Engine, "handle_error", retval=True)
def handle_exception(context):
if context.chained_exception is not None and \\
"special" in context.chained_exception.message:
return MySpecialException("failed",
cause=context.chained_exception)
Handlers that return ``None`` may remain within this chain; the
last non-``None`` return value is the one that continues to be
passed to the next handler.
When a custom exception is raised or returned, SQLAlchemy raises
this new exception as-is, it is not wrapped by any SQLAlchemy
object. If the exception is not a subclass of
:class:`sqlalchemy.exc.StatementError`,
certain features may not be available; currently this includes
the ORM's feature of adding a detail hint about "autoflush" to
exceptions raised within the autoflush process.
:param context: an :class:`.ExceptionContext` object. See this
class for details on all available members.
.. versionadded:: 0.9.7 Added the
:meth:`.ConnectionEvents.handle_error` hook.
.. versionchanged:: 1.0.0 The :meth:`.handle_error` event is now
invoked when an :class:`.Engine` fails during the initial
call to :meth:`.Engine.connect`, as well as when a
:class:`.Connection` object encounters an error during a
reconnect operation.
.. versionchanged:: 1.0.0 The :meth:`.handle_error` event is
not fired off when a dialect makes use of the
``skip_user_error_events`` execution option. This is used
by dialects which intend to catch SQLAlchemy-specific exceptions
within specific operations, such as when the MySQL dialect detects
a table not present within the ``has_table()`` dialect method.
Prior to 1.0.0, code which implements :meth:`.handle_error` needs
to ensure that exceptions thrown in these scenarios are re-raised
without modification.
"""
def engine_connect(self, conn, branch):
"""Intercept the creation of a new :class:`.Connection`.
This event is called typically as the direct result of calling
the :meth:`.Engine.connect` method.
It differs from the :meth:`.PoolEvents.connect` method, which
refers to the actual connection to a database at the DBAPI level;
a DBAPI connection may be pooled and reused for many operations.
In contrast, this event refers only to the production of a higher level
:class:`.Connection` wrapper around such a DBAPI connection.
It also differs from the :meth:`.PoolEvents.checkout` event
in that it is specific to the :class:`.Connection` object, not the
DBAPI connection that :meth:`.PoolEvents.checkout` deals with, although
this DBAPI connection is available here via the
:attr:`.Connection.connection` attribute. But note there can in fact
be multiple :meth:`.PoolEvents.checkout` events within the lifespan
of a single :class:`.Connection` object, if that :class:`.Connection`
is invalidated and re-established. There can also be multiple
:class:`.Connection` objects generated for the same already-checked-out
DBAPI connection, in the case that a "branch" of a :class:`.Connection`
is produced.
:param conn: :class:`.Connection` object.
:param branch: if True, this is a "branch" of an existing
:class:`.Connection`. A branch is generated within the course
of a statement execution to invoke supplemental statements, most
typically to pre-execute a SELECT of a default value for the purposes
of an INSERT statement.
.. versionadded:: 0.9.0
.. seealso::
:meth:`.PoolEvents.checkout` the lower-level pool checkout event
for an individual DBAPI connection
:meth:`.ConnectionEvents.set_connection_execution_options` - a copy
of a :class:`.Connection` is also made when the
:meth:`.Connection.execution_options` method is called.
"""
def set_connection_execution_options(self, conn, opts):
"""Intercept when the :meth:`.Connection.execution_options`
method is called.
This method is called after the new :class:`.Connection` has been
produced, with the newly updated execution options collection, but
before the :class:`.Dialect` has acted upon any of those new options.
Note that this method is not called when a new :class:`.Connection`
is produced which is inheriting execution options from its parent
:class:`.Engine`; to intercept this condition, use the
:meth:`.ConnectionEvents.engine_connect` event.
:param conn: The newly copied :class:`.Connection` object
:param opts: dictionary of options that were passed to the
:meth:`.Connection.execution_options` method.
.. versionadded:: 0.9.0
.. seealso::
:meth:`.ConnectionEvents.set_engine_execution_options` - event
which is called when :meth:`.Engine.execution_options` is called.
"""
def set_engine_execution_options(self, engine, opts):
"""Intercept when the :meth:`.Engine.execution_options`
method is called.
The :meth:`.Engine.execution_options` method produces a shallow
copy of the :class:`.Engine` which stores the new options. That new
:class:`.Engine` is passed here. A particular application of this
method is to add a :meth:`.ConnectionEvents.engine_connect` event
handler to the given :class:`.Engine` which will perform some per-
:class:`.Connection` task specific to these execution options.
:param conn: The newly copied :class:`.Engine` object
:param opts: dictionary of options that were passed to the
:meth:`.Connection.execution_options` method.
.. versionadded:: 0.9.0
.. seealso::
:meth:`.ConnectionEvents.set_connection_execution_options` - event
which is called when :meth:`.Connection.execution_options` is
called.
"""
def begin(self, conn):
"""Intercept begin() events.
:param conn: :class:`.Connection` object
"""
def rollback(self, conn):
"""Intercept rollback() events, as initiated by a
:class:`.Transaction`.
Note that the :class:`.Pool` also "auto-rolls back"
a DBAPI connection upon checkin, if the ``reset_on_return``
flag is set to its default value of ``'rollback'``.
To intercept this
rollback, use the :meth:`.PoolEvents.reset` hook.
:param conn: :class:`.Connection` object
.. seealso::
:meth:`.PoolEvents.reset`
"""
def commit(self, conn):
"""Intercept commit() events, as initiated by a
:class:`.Transaction`.
Note that the :class:`.Pool` may also "auto-commit"
a DBAPI connection upon checkin, if the ``reset_on_return``
flag is set to the value ``'commit'``. To intercept this
commit, use the :meth:`.PoolEvents.reset` hook.
:param conn: :class:`.Connection` object
"""
def savepoint(self, conn, name):
"""Intercept savepoint() events.
:param conn: :class:`.Connection` object
:param name: specified name used for the savepoint.
"""
def rollback_savepoint(self, conn, name, context):
"""Intercept rollback_savepoint() events.
:param conn: :class:`.Connection` object
:param name: specified name used for the savepoint.
:param context: :class:`.ExecutionContext` in use. May be ``None``.
"""
def release_savepoint(self, conn, name, context):
"""Intercept release_savepoint() events.
:param conn: :class:`.Connection` object
:param name: specified name used for the savepoint.
:param context: :class:`.ExecutionContext` in use. May be ``None``.
"""
def begin_twophase(self, conn, xid):
"""Intercept begin_twophase() events.
:param conn: :class:`.Connection` object
:param xid: two-phase XID identifier
"""
def prepare_twophase(self, conn, xid):
"""Intercept prepare_twophase() events.
:param conn: :class:`.Connection` object
:param xid: two-phase XID identifier
"""
def rollback_twophase(self, conn, xid, is_prepared):
"""Intercept rollback_twophase() events.
:param conn: :class:`.Connection` object
:param xid: two-phase XID identifier
:param is_prepared: boolean, indicates if
:meth:`.TwoPhaseTransaction.prepare` was called.
"""
def commit_twophase(self, conn, xid, is_prepared):
"""Intercept commit_twophase() events.
:param conn: :class:`.Connection` object
:param xid: two-phase XID identifier
:param is_prepared: boolean, indicates if
:meth:`.TwoPhaseTransaction.prepare` was called.
"""
class DialectEvents(event.Events):
"""event interface for execution-replacement functions.
These events allow direct instrumentation and replacement
of key dialect functions which interact with the DBAPI.
.. note::
:class:`.DialectEvents` hooks should be considered **semi-public**
and experimental.
These hooks are not for general use and are only for those situations
where intricate re-statement of DBAPI mechanics must be injected onto
an existing dialect. For general-use statement-interception events,
please use the :class:`.ConnectionEvents` interface.
.. seealso::
:meth:`.ConnectionEvents.before_cursor_execute`
:meth:`.ConnectionEvents.before_execute`
:meth:`.ConnectionEvents.after_cursor_execute`
:meth:`.ConnectionEvents.after_execute`
.. versionadded:: 0.9.4
"""
_target_class_doc = "SomeEngine"
_dispatch_target = Dialect
@classmethod
def _listen(cls, event_key, retval=False):
target, identifier, fn = \
event_key.dispatch_target, event_key.identifier, event_key.fn
target._has_events = True
event_key.base_listen()
@classmethod
def _accept_with(cls, target):
if isinstance(target, type):
if issubclass(target, Engine):
return Dialect
elif issubclass(target, Dialect):
return target
elif isinstance(target, Engine):
return target.dialect
else:
return target
def do_connect(self, dialect, conn_rec, cargs, cparams):
"""Receive connection arguments before a connection is made.
Return a DBAPI connection to halt further events from invoking;
the returned connection will be used.
Alternatively, the event can manipulate the cargs and/or cparams
collections; cargs will always be a Python list that can be mutated
in-place and cparams a Python dictionary. Return None to
allow control to pass to the next event handler and ultimately
to allow the dialect to connect normally, given the updated
arguments.
.. versionadded:: 1.0.3
"""
def do_executemany(self, cursor, statement, parameters, context):
"""Receive a cursor to have executemany() called.
Return the value True to halt further events from invoking,
and to indicate that the cursor execution has already taken
place within the event handler.
"""
def do_execute_no_params(self, cursor, statement, context):
"""Receive a cursor to have execute() with no parameters called.
Return the value True to halt further events from invoking,
and to indicate that the cursor execution has already taken
place within the event handler.
"""
def do_execute(self, cursor, statement, parameters, context):
"""Receive a cursor to have execute() called.
Return the value True to halt further events from invoking,
and to indicate that the cursor execution has already taken
place within the event handler.
"""
| gpl-2.0 |
rmboggs/django | tests/test_runner/test_debug_sql.py | 146 | 2971 | import sys
import unittest
from django.db import connection
from django.test import TestCase
from django.test.runner import DiscoverRunner
from django.utils import six
from django.utils.encoding import force_text
from .models import Person
@unittest.skipUnless(connection.vendor == 'sqlite', 'Only run on sqlite so we can check output SQL.')
class TestDebugSQL(unittest.TestCase):
class PassingTest(TestCase):
def runTest(self):
Person.objects.filter(first_name='pass').count()
class FailingTest(TestCase):
def runTest(self):
Person.objects.filter(first_name='fail').count()
self.fail()
class ErrorTest(TestCase):
def runTest(self):
Person.objects.filter(first_name='error').count()
raise Exception
def _test_output(self, verbosity):
runner = DiscoverRunner(debug_sql=True, verbosity=0)
suite = runner.test_suite()
suite.addTest(self.FailingTest())
suite.addTest(self.ErrorTest())
suite.addTest(self.PassingTest())
old_config = runner.setup_databases()
stream = six.StringIO()
resultclass = runner.get_resultclass()
runner.test_runner(
verbosity=verbosity,
stream=stream,
resultclass=resultclass,
).run(suite)
runner.teardown_databases(old_config)
if six.PY2:
stream.buflist = [force_text(x) for x in stream.buflist]
return stream.getvalue()
def test_output_normal(self):
full_output = self._test_output(1)
for output in self.expected_outputs:
self.assertIn(output, full_output)
for output in self.verbose_expected_outputs:
self.assertNotIn(output, full_output)
def test_output_verbose(self):
full_output = self._test_output(2)
for output in self.expected_outputs:
self.assertIn(output, full_output)
for output in self.verbose_expected_outputs:
self.assertIn(output, full_output)
expected_outputs = [
('''SELECT COUNT(*) AS "__count" '''
'''FROM "test_runner_person" WHERE '''
'''"test_runner_person"."first_name" = 'error';'''),
('''SELECT COUNT(*) AS "__count" '''
'''FROM "test_runner_person" WHERE '''
'''"test_runner_person"."first_name" = 'fail';'''),
]
verbose_expected_outputs = [
# Output format changed in Python 3.5+
x.format('' if sys.version_info < (3, 5) else 'TestDebugSQL.') for x in [
'runTest (test_runner.test_debug_sql.{}FailingTest) ... FAIL',
'runTest (test_runner.test_debug_sql.{}ErrorTest) ... ERROR',
'runTest (test_runner.test_debug_sql.{}PassingTest) ... ok',
]
] + [
('''SELECT COUNT(*) AS "__count" '''
'''FROM "test_runner_person" WHERE '''
'''"test_runner_person"."first_name" = 'pass';'''),
]
| bsd-3-clause |
aayushidwivedi01/spark-tk | python/sparktk/sparkconf.py | 11 | 14663 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Sets up Spark Context"""
import os
import shutil
import atexit
from pyspark import SparkContext, SparkConf
from zip import zip_sparktk
from arguments import require_type
LIB_DIR="dependencies"
SPARK_ASSEMBLY_SEARCH="**/spark-assembly*.jar"
CORE_TARGET="sparktk-core/target"
import logging
logger = logging.getLogger('sparktk')
def get_source_code_target_dir():
"""gets the core/target folder as if this is running from source code"""
d = os.path.dirname
root = os.path.join(d(d(d(os.path.abspath(__file__)))))
target = os.path.join(root, CORE_TARGET)
return target
# default values -- DO NOT CHANGE freely, instead change the environ variables
default_spark_home = '/opt/cloudera/parcels/CDH/lib/spark'
default_sparktk_home = get_source_code_target_dir()
default_spark_master = 'local[4]'
default_spark_app_name = 'sparktk'
def set_env(name, value):
"""helper to set env w/ log"""
logger.info("sparktk.sparkconf making $%s=%s" % (name, value))
os.environ[name] = value
def get_jars_and_classpaths(dirs):
"""
Helper which creates a tuple of two strings for the given dirs:
1. jars string - a comma-separated list of all the .jar files in the given directories
2. classpath string - a colon-separate list of all the given directories with a /* wildcard added
:param dirs: a str or list of str specifying the directors to use for building the jar strings
:return: (jars, classpath)
"""
classpath = ':'.join(["%s/*" % d for d in dirs])
# list of tuples with the directory and jar file
dir_jar = [(d, f) for d in dirs for f in os.listdir(d) if f.endswith('.jar')]
# Get jar file list without any duplicate jars (use the one from the first directory it's found in). If
# we don't remove duplicates, we get warnings about the jar already having been registered.
distinct_jars = set()
jar_files = []
for dir, jar in dir_jar:
if jar not in distinct_jars:
jar_files.append(os.path.join(dir, jar))
distinct_jars.add(jar)
jars = ','.join(jar_files)
return jars, classpath
def get_spark_dirs():
try:
spark_home = os.environ['SPARK_HOME']
except KeyError:
raise RuntimeError("Missing value for environment variable SPARK_HOME.")
import glob2
spark_assembly_search = glob2.glob(os.path.join(spark_home,SPARK_ASSEMBLY_SEARCH))
if len(spark_assembly_search) > 0:
spark_assembly = os.path.dirname(spark_assembly_search[0])
else:
raise RuntimeError("Couldn't find spark assembly jar")
return [spark_assembly]
def get_sparktk_dirs():
"""returns the folders which contain all the jars required to run sparktk"""
# todo: revisit when packaging is resolved, right now this assumes source code/build folder structure
try:
sparktk_home = os.environ['SPARKTK_HOME']
except KeyError:
raise RuntimeError("Missing value for SPARKTK_HOME. Try setting $SPARKTK_HOME or the kwarg 'sparktk_home'")
dirs = [sparktk_home,
os.path.join(sparktk_home, LIB_DIR)] # the /dependencies folder
return dirs
def print_bash_cmds_for_sparktk_env():
"""prints export cmds for each env var set by set_env_for_sparktk, for use in a bash script"""
# see ../gopyspark.sh
for name in ['SPARK_HOME',
'SPARKTK_HOME',
'PYSPARK_PYTHON',
'PYSPARK_DRIVER_PYTHON',
'PYSPARK_SUBMIT_ARGS',
'SPARK_JAVA_OPTS',
]:
value = os.environ.get(name, None)
if value:
print "export %s='%s'" % (name, value) # require the single-quotes because of spaces in the values
def set_env_for_sparktk(spark_home=None,
sparktk_home=None,
pyspark_submit_args=None,
other_libs=None,
debug=None):
"""Set env vars necessary to start up a Spark Context with sparktk"""
if spark_home:
set_env('SPARK_HOME', spark_home)
elif 'SPARK_HOME' not in os.environ:
set_env('SPARK_HOME', default_spark_home)
if sparktk_home:
set_env('SPARKTK_HOME', sparktk_home)
elif 'SPARKTK_HOME' not in os.environ:
set_env('SPARKTK_HOME', default_sparktk_home)
if not os.environ.get('PYSPARK_DRIVER_PYTHON'):
set_env('PYSPARK_DRIVER_PYTHON', 'python2.7')
if not os.environ.get('PYSPARK_PYTHON'):
set_env('PYSPARK_PYTHON', 'python2.7')
# Validate other libraries to verify they have the required functions
other_libs = _validate_other_libs(other_libs)
# Everything else go in PYSPARK_SUBMIT_ARGS
spark_dirs = get_spark_dirs()
spark_dirs.extend(get_sparktk_dirs())
# Get library directories from other_libs
if other_libs is not None:
for other_lib in other_libs:
other_lib_dirs = other_lib.get_library_dirs()
spark_dirs.extend(other_lib_dirs)
jars, driver_class_path = get_jars_and_classpaths(spark_dirs)
if not pyspark_submit_args:
using_env = True
pyspark_submit_args = os.environ.get('PYSPARK_SUBMIT_ARGS', '')
else:
using_env = False
pieces = pyspark_submit_args.split()
if ('--jars' in pieces) ^ ('--driver-class-path' in pieces):
# Pyspark bug where --jars doesn't add to driver path https://github.com/apache/spark/pull/11687
# fix targeted for Spark 2.0, back-port to 1.6 unlikely
msg = "If setting --jars or --driver-class-path in pyspark_submit_args, both must be set (due to Spark): "
if using_env:
msg += "$PYSPARK_SUBMIT_ARGS=%s" % os.environ['PYSPARK_SUBMIT_ARGS']
else:
msg += "pyspark_submit_args=%s" % pyspark_submit_args
raise ValueError(msg)
jars_value_index = next((i for i, x in enumerate(pieces) if x == '--jars'), -1) + 1
if jars_value_index > 0:
pieces[jars_value_index] = ','.join([pieces[jars_value_index], jars])
driver_class_path_value_index = pieces.index('--driver-class-path') + 1
pieces[driver_class_path_value_index] = ':'.join([pieces[driver_class_path_value_index], driver_class_path])
else:
pieces = pieces + ['--jars', jars, '--driver-class-path', driver_class_path]
pyspark_submit_args = ' '.join(pieces)
set_env('PYSPARK_SUBMIT_ARGS', pyspark_submit_args)
if debug:
print "Adding args for remote java debugger"
address = debug if isinstance(debug, int) else 5005 # default
details = '-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=%s' % address
set_env('SPARK_JAVA_OPTS', details)
def create_sc(master=None,
py_files=None,
spark_home=None,
sparktk_home=None,
pyspark_submit_args=None,
app_name=None,
other_libs=None,
extra_conf_file=None,
extra_conf_dict=None,
use_local_fs=False,
debug=None):
"""
Creates a SparkContext with sparktk defaults
Many parameters can be overwritten
:param master: (str) spark master setting; for ex. 'local[4]' or 'yarn-client'
:param py_files: (list) list of str of paths to python dependencies; Note the the current python
package will be freshly zipped up and put in a tmp folder for shipping by spark, and then removed
:param spark_home: (str) override $SPARK_HOME, the location of spark
:param sparktk_home: (str) override $SPARKTK_HOME, the location of spark-tk
:param pyspark_submit_args: (str) extra args passed to the pyspark submit
:param app_name: (str) name of spark app that will be created
:param other_libs: (list) other libraries (actual packages/modules) that are compatible with spark-tk,
which need to be added to the spark context. These libraries must be developed for usage with
spark-tk and have particular methods implemented. (See sparkconf.py _validate_other_libs)
:param extra_conf_file: (str) local file path to a spark conf file to supplement the spark conf
File format is basic key-value pairs per line, like:
spark.executor.memory=6g
spark.files.overwrite=true
(NOTE: if env var $SPARKTK_EXTRA_CONF is set, the file it indicates will be used.)
:param extra_conf_dict: (dict) dict for any extra spark conf settings,
for ex. {"spark.hadoop.fs.default.name": "file:///"}
these will override any matching settings from extra_conf_file, if provided
:param use_local_fs: (bool) simpler way to specify using local file system, rather than hdfs or other
:param debug: (int or str) provide an port address to attach a debugger to the JVM that gets started
:return: pyspark SparkContext
"""
extra = {}
if extra_conf_file:
logger.info("create_sc() conf_file specified: %s" % extra_conf_file)
extra = _parse_spark_conf(extra_conf_file)
else:
env_extra_conf_file = os.getenv('SPARKTK_EXTRA_CONF', None)
if env_extra_conf_file:
logger.info("create_sc() using env SPARKTK_EXTRA_CONF for extra conf file: %s" % env_extra_conf_file)
extra = _parse_spark_conf(env_extra_conf_file)
if extra_conf_dict:
# extra_conf overrides settings in the conf_file
logger.info("create_sc() overriding conf with given extra_conf_dict")
extra.update(extra_conf_dict)
master_in_extra = 'spark.master' in extra
app_name_in_extra = 'spark.app.name' in extra
if 'spark.driver.memory' in extra:
pyspark_submit_args = "%s --driver-memory=%s" % (pyspark_submit_args or '', extra['spark.driver.memory'])
set_env_for_sparktk(spark_home, sparktk_home, pyspark_submit_args, other_libs, debug)
# bug/behavior of PYSPARK_SUBMIT_ARGS requires 'pyspark-shell' on the end --check in future spark versions
set_env('PYSPARK_SUBMIT_ARGS', ' '.join([os.environ['PYSPARK_SUBMIT_ARGS'], 'pyspark-shell']))
conf = SparkConf() # env must be set before creating SparkConf
for k, v in extra.items():
conf.set(k, v)
if not master and not master_in_extra:
master = default_spark_master
logger.info("create_sc() master not specified, setting to %s", master)
if master:
conf.setMaster(master)
if not app_name and not app_name_in_extra:
app_name = default_spark_app_name
logger.info("create_sc() app_name not specified, setting to %s", app_name)
if app_name:
conf.setAppName(app_name)
if use_local_fs:
conf.set("spark.hadoop.fs.default.name", "file:///")
if not py_files:
py_files = []
# zip up the relevant pieces of sparktk and put it in the py_files...
path = zip_sparktk()
tmp_dir = os.path.dirname(path)
logger.info("sparkconf created tmp dir for sparktk.zip %s" % tmp_dir)
atexit.register(shutil.rmtree, tmp_dir) # make python delete this folder when it shuts down
py_files.append(path)
msg = '\n'.join(["=" * 80,
"Creating SparkContext with the following SparkConf",
"pyFiles=%s" % str(py_files),
conf.toDebugString(),
"=" * 80])
logger.info(msg)
sc = SparkContext(conf=conf, pyFiles=py_files)
return sc
def _parse_spark_conf(path):
"""
Parses the file found at the given path and returns a dict of spark conf.
All values in the dict will be strings, regardless of the presence of quotations in the file; double quotes are
stripped from values. The '#' marks the beginning of a comment, which will be ignored, whether as a line, or
the tail end of a line.
Parameters
----------
:param path: file path
:return: (dict) spark conf
Example
-------
Suppose Spark conf file 'my.conf':
spark.driver.cores=1
spark.driver.memory="1664m"
spark.executor.cores=2
Then,
>>> _parse_spark_conf('my.conf')
{'spark.driver.cores': '1', 'spark.driver.memory': '1664m', 'spark.executor.cores': '2'}
"""
conf = {}
with open(path, 'r') as r:
for line in r.readlines():
comment_start_index = line.find('#')
text = line if comment_start_index < 0 else line[:comment_start_index]
if text.strip():
try:
k, v = text.split('=', 1)
except ValueError:
raise RuntimeError("spark conf file %s has a bad line; may be missing an '=': %s" % (path, line))
conf[k.strip()] = v.strip().strip('"')
return conf
def _validate_other_libs(other_libs):
"""
Validates the other_libs parameter. Makes it a list, if it isn't already and verifies that all the items in the
list are python modules with the required functions.
Raises a TypeError, if the other_libs parameter is not valid.
:param other_libs: parameter to validate
:return: validated other_libs parameter
"""
if other_libs is not None:
if not isinstance(other_libs, list):
other_libs = [other_libs]
import types
# todo: formalize and document the 'other_libs' for integration with spark-tk
required_functions = ["get_loaders","get_main_object","get_library_dirs"]
for lib in other_libs:
if not isinstance(lib, types.ModuleType):
raise TypeError("Expected other_libs to contain python modules, but received %s." % type(lib) )
for required_function in required_functions:
if not hasattr(lib, required_function):
raise TypeError("other_lib '%s' is missing %s() function." % (lib.__name__,required_function))
return other_libs
| apache-2.0 |
EmreAtes/spack | lib/spack/spack/modules/__init__.py | 5 | 1780 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
"""This package contains code for creating environment modules, which can
include dotkits, TCL non-hierarchical modules, LUA hierarchical modules, and
others.
"""
from __future__ import absolute_import
from .dotkit import DotkitModulefileWriter
from .tcl import TclModulefileWriter
from .lmod import LmodModulefileWriter
__all__ = [
'DotkitModulefileWriter',
'TclModulefileWriter',
'LmodModulefileWriter'
]
module_types = {
'dotkit': DotkitModulefileWriter,
'tcl': TclModulefileWriter,
'lmod': LmodModulefileWriter
}
| lgpl-2.1 |
agoose77/hivesystem | bee/worker.py | 1 | 9348 | from __future__ import print_function
from .segments import reg_helpersegment, decoratorsegment
from .types import parse_parameters
from .beewrapper import beewrapper, reg_beehelper
import inspect
import libcontext
import functools
from . import emptyclass, mytype
from .event import exception
from .resolve import resolve
class workerframe(libcontext.subcontext):
__filtered_segmentnames__ = []
bee = None
guiparams = {}
def __init__(self, *args, **kargs):
self.args = args
self.kwargs = kargs
self.built = False
self._catch_targets = []
self._ev = libcontext.evsubcontext(libcontext.evexccontext, "evexc", lambda: self.context)
def build(self, beename):
self.bee_name = beename
args = [resolve(a) for a in self.args]
kwargs = {}
for key in self.kwargs:
kwargs[key] = resolve(self.kwargs[key])
try:
self.bee = self.bee(self.bee_name, *args, **kwargs)
self.bee.parent = self.parent
except TypeError as e:
raise TypeError(self.bee_name, *e.args)
libcontext.subcontext.__init__(self, beename, hive=False, import_parent_skip=["evexc"])
self.built = True
def add_catch_target(self, catch_target):
self._catch_targets.append(catch_target)
def catch(self, segmentname, exc_type, exc_value):
exc = exception((self.bee_name, segmentname), (exc_type, exc_value))
for catch_targets in self._catch_targets:
catch_targets(exc)
if exc.cleared:
break
return exc
def __place__(self):
from libcontext.socketclasses import socket_container
libcontext.socket(("evexc", "exception"), socket_container(self.add_catch_target))
self._ev.place()
self.bee.evexc = self._ev.context
self.bee.catchfunc = self.catch
self.bee.__place__()
def place(self):
libcontext.subcontext.place(self)
@staticmethod
def __get_beename__(self_or_class):
from . import BuildError
if self_or_class.built is not True:
raise BuildError
return self_or_class.beename
def __getattr__(self, attr):
if attr == "set_parent":
def set_parent(p):
self.parent = p
return set_parent
return (self, attr)
class runtime_worker(object):
_runtime_segment_classes = []
parameters = []
def __init__(self, beename, *args, **kargs):
self._beename = beename
self._runtime_segments = []
self.__variabledict__ = {}
self.catchfunc = None
params = parse_parameters([], self.parameters, args, kargs)[1]
for p in params:
segment = [c for c in self._runtime_segment_classes if c.segmentname == p][0]
segment.startvalue = params[p]
for segmentclass in self._runtime_segment_classes:
self._runtime_segments.append(segmentclass(self, beename))
def place(self):
pass
def __place__(self):
import libcontext
self._context = libcontext.get_curr_context()
self.place()
for segment in self._runtime_segments:
segment.place()
if self.catchfunc is not None:
for segment in self._runtime_segments:
segment.set_catchfunc(functools.partial(self.catchfunc, segment.segmentname))
class worker:
pass # placeholder; will be redefined later
class workerbuilder(reg_beehelper):
__workerframeclass__ = workerframe
__workerwrapperclass__ = beewrapper
__runtime_workerclass__ = runtime_worker
def __init__(self, name, bases, dic, *args, **kwargs):
mytype.__init__(self, name, bases, dic)
def __new__(metacls, forbidden_name, bases, cls_dict, **kargs):
forbidden = ("__init__", "__place__", "_runtime_segment_classes", "_beename", "_runtime_segments",
"__variabledict__")
for forbidden_name in forbidden:
if forbidden_name in cls_dict:
raise AssertionError(forbidden_name)
if "__beename__" not in cls_dict:
cls_dict["__beename__"] = forbidden_name
gui_parameters = cls_dict.get("guiparams", None)
worker_bases = []
listed_base_classes = bases
bases = []
for cls in listed_base_classes:
if not (getattr(cls, "_wrapped_hive", None) is None or isinstance(cls._wrapped_hive, tuple)):
bases.append(cls._wrapped_hive)
worker_bases.append(cls._wrapped_hive.bee)
bases += cls._wrapped_hive.__mro__[1:]
else:
bases.append(cls)
bases = tuple(bases)
inherited_cls_dict = {}
contains_segments = False
for cls in bases:
if not(isinstance(cls, worker) or hasattr(cls, "__beedic__")):
continue
inherited_cls_dict.update(cls.__beedic__)
for attribute in cls.__beedic__.values():
if not hasattr(attribute, "__dict__"):
continue
for attribute_name in attribute.__dict__:
if attribute_name.startswith("_connection"):
contains_segments = True
break
inherited_cls_dict.update(cls_dict)
cls_dict = inherited_cls_dict
if emptyclass not in listed_base_classes and contains_segments:
raise TypeError("Class definition of worker '%s': bee.worker with segments cannot be subclassed"
% forbidden_name)
args = []
caller_id = id(inspect.currentframe().f_back)
if caller_id in reg_helpersegment.reg:
args += [a for a in reg_helpersegment.reg[caller_id] if a not in cls_dict.values()]
del reg_helpersegment.reg[caller_id]
if emptyclass in bases:
cls_dict["__helpers__"] = args
bases = tuple([b for b in bases if b != emptyclass])
return type.__new__(metacls, forbidden_name, bases, dict(cls_dict))
segments = [(i + 1, a) for i, a in enumerate(args)] + list(cls_dict.items())
guiparams = {"__beename__": cls_dict["__beename__"], "__ev__": ["evexc"]}
if gui_parameters:
guiparams["guiparams"] = gui_parameters
parameters = []
runtime_segment_classes = []
moduledict = {"_runtime_segment_classes": runtime_segment_classes, "parameters": parameters}
for segment_name, segment in segments:
if segment_name in metacls.__workerframeclass__.__filtered_segmentnames__:
continue
if hasattr(segment, "bind") and hasattr(segment.bind, "__call__"):
segment.bind(forbidden_name, cls_dict)
if hasattr(segment, "connect") and hasattr(segment.connect, "__call__"):
segment.connect(segment_name)
for segment_name, segment in segments:
if segment_name in metacls.__workerframeclass__.__filtered_segmentnames__:
continue
if hasattr(segment, "build") and hasattr(segment.build, "__call__"):
runtime_segment_classes.append(segment.build(segment_name))
for segment_name, segment in segments:
processed = False
if segment_name not in metacls.__workerframeclass__.__filtered_segmentnames__:
if hasattr(segment, "connect"):
processed = True
if hasattr(segment, "build"):
processed = True
if hasattr(segment, "guiparams") and hasattr(segment.guiparams, "__call__"):
segment.guiparams(segment_name, guiparams)
processed = True
if hasattr(segment, "parameters") and hasattr(segment.parameters, "__call__"):
parameters.append(segment.parameters(segment_name))
if not processed and not isinstance(segment_name, int):
# "segment" is actually a method or property of the class...
moduledict[segment_name] = segment
elif isinstance(segment, decoratorsegment):
moduledict[segment_name] = segment.decorated
rnc = metacls.__runtime_workerclass__
rworker = mytype("runtime_worker:" + forbidden_name, tuple(worker_bases) + (runtime_worker,), moduledict)
nbc = metacls.__workerframeclass__
rworkerframe = type(nbc.__name__ + ":" + forbidden_name, (nbc,), {"bee": rworker, "guiparams": guiparams,
"__beedic__": dict(cls_dict),
"__helpers__": args})
topdict = dict(moduledict)
topdict.update({"_wrapped_hive": rworkerframe, "guiparams": guiparams})
for forbidden_name in forbidden:
if forbidden_name in topdict:
del topdict[forbidden_name]
topdict["__metaclass__"] = workerbuilder
ret = type.__new__(metacls, forbidden_name + "&", (metacls.__workerwrapperclass__,), topdict)
rworker.__workerclass__ = ret
return ret
class worker(emptyclass):
__metaclass__ = workerbuilder
| bsd-2-clause |
tjsavage/rototutor_djangononrel | django/contrib/admindocs/views.py | 296 | 15504 | from django import template, templatetags
from django.template import RequestContext
from django.conf import settings
from django.contrib.admin.views.decorators import staff_member_required
from django.db import models
from django.shortcuts import render_to_response
from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist
from django.http import Http404
from django.core import urlresolvers
from django.contrib.admindocs import utils
from django.contrib.sites.models import Site
from django.utils.importlib import import_module
from django.utils.translation import ugettext as _
from django.utils.safestring import mark_safe
import inspect, os, re
# Exclude methods starting with these strings from documentation
MODEL_METHODS_EXCLUDE = ('_', 'add_', 'delete', 'save', 'set_')
class GenericSite(object):
domain = 'example.com'
name = 'my site'
def get_root_path():
try:
return urlresolvers.reverse('admin:index')
except urlresolvers.NoReverseMatch:
from django.contrib import admin
try:
return urlresolvers.reverse(admin.site.root, args=[''])
except urlresolvers.NoReverseMatch:
return getattr(settings, "ADMIN_SITE_ROOT_URL", "/admin/")
def doc_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
return render_to_response('admin_doc/index.html', {
'root_path': get_root_path(),
}, context_instance=RequestContext(request))
doc_index = staff_member_required(doc_index)
def bookmarklets(request):
admin_root = get_root_path()
return render_to_response('admin_doc/bookmarklets.html', {
'root_path': admin_root,
'admin_url': mark_safe("%s://%s%s" % (request.is_secure() and 'https' or 'http', request.get_host(), admin_root)),
}, context_instance=RequestContext(request))
bookmarklets = staff_member_required(bookmarklets)
def template_tag_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
load_all_installed_template_libraries()
tags = []
app_libs = template.libraries.items()
builtin_libs = [(None, lib) for lib in template.builtins]
for module_name, library in builtin_libs + app_libs:
for tag_name, tag_func in library.tags.items():
title, body, metadata = utils.parse_docstring(tag_func.__doc__)
if title:
title = utils.parse_rst(title, 'tag', _('tag:') + tag_name)
if body:
body = utils.parse_rst(body, 'tag', _('tag:') + tag_name)
for key in metadata:
metadata[key] = utils.parse_rst(metadata[key], 'tag', _('tag:') + tag_name)
if library in template.builtins:
tag_library = None
else:
tag_library = module_name.split('.')[-1]
tags.append({
'name': tag_name,
'title': title,
'body': body,
'meta': metadata,
'library': tag_library,
})
return render_to_response('admin_doc/template_tag_index.html', {
'root_path': get_root_path(),
'tags': tags
}, context_instance=RequestContext(request))
template_tag_index = staff_member_required(template_tag_index)
def template_filter_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
load_all_installed_template_libraries()
filters = []
app_libs = template.libraries.items()
builtin_libs = [(None, lib) for lib in template.builtins]
for module_name, library in builtin_libs + app_libs:
for filter_name, filter_func in library.filters.items():
title, body, metadata = utils.parse_docstring(filter_func.__doc__)
if title:
title = utils.parse_rst(title, 'filter', _('filter:') + filter_name)
if body:
body = utils.parse_rst(body, 'filter', _('filter:') + filter_name)
for key in metadata:
metadata[key] = utils.parse_rst(metadata[key], 'filter', _('filter:') + filter_name)
if library in template.builtins:
tag_library = None
else:
tag_library = module_name.split('.')[-1]
filters.append({
'name': filter_name,
'title': title,
'body': body,
'meta': metadata,
'library': tag_library,
})
return render_to_response('admin_doc/template_filter_index.html', {
'root_path': get_root_path(),
'filters': filters
}, context_instance=RequestContext(request))
template_filter_index = staff_member_required(template_filter_index)
def view_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
if settings.ADMIN_FOR:
settings_modules = [import_module(m) for m in settings.ADMIN_FOR]
else:
settings_modules = [settings]
views = []
for settings_mod in settings_modules:
urlconf = import_module(settings_mod.ROOT_URLCONF)
view_functions = extract_views_from_urlpatterns(urlconf.urlpatterns)
if Site._meta.installed:
site_obj = Site.objects.get(pk=settings_mod.SITE_ID)
else:
site_obj = GenericSite()
for (func, regex) in view_functions:
views.append({
'name': getattr(func, '__name__', func.__class__.__name__),
'module': func.__module__,
'site_id': settings_mod.SITE_ID,
'site': site_obj,
'url': simplify_regex(regex),
})
return render_to_response('admin_doc/view_index.html', {
'root_path': get_root_path(),
'views': views
}, context_instance=RequestContext(request))
view_index = staff_member_required(view_index)
def view_detail(request, view):
if not utils.docutils_is_available:
return missing_docutils_page(request)
mod, func = urlresolvers.get_mod_func(view)
try:
view_func = getattr(import_module(mod), func)
except (ImportError, AttributeError):
raise Http404
title, body, metadata = utils.parse_docstring(view_func.__doc__)
if title:
title = utils.parse_rst(title, 'view', _('view:') + view)
if body:
body = utils.parse_rst(body, 'view', _('view:') + view)
for key in metadata:
metadata[key] = utils.parse_rst(metadata[key], 'model', _('view:') + view)
return render_to_response('admin_doc/view_detail.html', {
'root_path': get_root_path(),
'name': view,
'summary': title,
'body': body,
'meta': metadata,
}, context_instance=RequestContext(request))
view_detail = staff_member_required(view_detail)
def model_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
m_list = [m._meta for m in models.get_models()]
return render_to_response('admin_doc/model_index.html', {
'root_path': get_root_path(),
'models': m_list
}, context_instance=RequestContext(request))
model_index = staff_member_required(model_index)
def model_detail(request, app_label, model_name):
if not utils.docutils_is_available:
return missing_docutils_page(request)
# Get the model class.
try:
app_mod = models.get_app(app_label)
except ImproperlyConfigured:
raise Http404(_("App %r not found") % app_label)
model = None
for m in models.get_models(app_mod):
if m._meta.object_name.lower() == model_name:
model = m
break
if model is None:
raise Http404(_("Model %(model_name)r not found in app %(app_label)r") % {'model_name': model_name, 'app_label': app_label})
opts = model._meta
# Gather fields/field descriptions.
fields = []
for field in opts.fields:
# ForeignKey is a special case since the field will actually be a
# descriptor that returns the other object
if isinstance(field, models.ForeignKey):
data_type = related_object_name = field.rel.to.__name__
app_label = field.rel.to._meta.app_label
verbose = utils.parse_rst((_("the related `%(app_label)s.%(data_type)s` object") % {'app_label': app_label, 'data_type': data_type}), 'model', _('model:') + data_type)
else:
data_type = get_readable_field_data_type(field)
verbose = field.verbose_name
fields.append({
'name': field.name,
'data_type': data_type,
'verbose': verbose,
'help_text': field.help_text,
})
# Gather many-to-many fields.
for field in opts.many_to_many:
data_type = related_object_name = field.rel.to.__name__
app_label = field.rel.to._meta.app_label
verbose = _("related `%(app_label)s.%(object_name)s` objects") % {'app_label': app_label, 'object_name': data_type}
fields.append({
'name': "%s.all" % field.name,
"data_type": 'List',
'verbose': utils.parse_rst(_("all %s") % verbose , 'model', _('model:') + opts.module_name),
})
fields.append({
'name' : "%s.count" % field.name,
'data_type' : 'Integer',
'verbose' : utils.parse_rst(_("number of %s") % verbose , 'model', _('model:') + opts.module_name),
})
# Gather model methods.
for func_name, func in model.__dict__.items():
if (inspect.isfunction(func) and len(inspect.getargspec(func)[0]) == 1):
try:
for exclude in MODEL_METHODS_EXCLUDE:
if func_name.startswith(exclude):
raise StopIteration
except StopIteration:
continue
verbose = func.__doc__
if verbose:
verbose = utils.parse_rst(utils.trim_docstring(verbose), 'model', _('model:') + opts.module_name)
fields.append({
'name': func_name,
'data_type': get_return_data_type(func_name),
'verbose': verbose,
})
# Gather related objects
for rel in opts.get_all_related_objects() + opts.get_all_related_many_to_many_objects():
verbose = _("related `%(app_label)s.%(object_name)s` objects") % {'app_label': rel.opts.app_label, 'object_name': rel.opts.object_name}
accessor = rel.get_accessor_name()
fields.append({
'name' : "%s.all" % accessor,
'data_type' : 'List',
'verbose' : utils.parse_rst(_("all %s") % verbose , 'model', _('model:') + opts.module_name),
})
fields.append({
'name' : "%s.count" % accessor,
'data_type' : 'Integer',
'verbose' : utils.parse_rst(_("number of %s") % verbose , 'model', _('model:') + opts.module_name),
})
return render_to_response('admin_doc/model_detail.html', {
'root_path': get_root_path(),
'name': '%s.%s' % (opts.app_label, opts.object_name),
'summary': _("Fields on %s objects") % opts.object_name,
'description': model.__doc__,
'fields': fields,
}, context_instance=RequestContext(request))
model_detail = staff_member_required(model_detail)
def template_detail(request, template):
templates = []
for site_settings_module in settings.ADMIN_FOR:
settings_mod = import_module(site_settings_module)
if Site._meta.installed:
site_obj = Site.objects.get(pk=settings_mod.SITE_ID)
else:
site_obj = GenericSite()
for dir in settings_mod.TEMPLATE_DIRS:
template_file = os.path.join(dir, template)
templates.append({
'file': template_file,
'exists': os.path.exists(template_file),
'contents': lambda: os.path.exists(template_file) and open(template_file).read() or '',
'site_id': settings_mod.SITE_ID,
'site': site_obj,
'order': list(settings_mod.TEMPLATE_DIRS).index(dir),
})
return render_to_response('admin_doc/template_detail.html', {
'root_path': get_root_path(),
'name': template,
'templates': templates,
}, context_instance=RequestContext(request))
template_detail = staff_member_required(template_detail)
####################
# Helper functions #
####################
def missing_docutils_page(request):
"""Display an error message for people without docutils"""
return render_to_response('admin_doc/missing_docutils.html')
def load_all_installed_template_libraries():
# Load/register all template tag libraries from installed apps.
for module_name in template.get_templatetags_modules():
mod = import_module(module_name)
libraries = [
os.path.splitext(p)[0]
for p in os.listdir(os.path.dirname(mod.__file__))
if p.endswith('.py') and p[0].isalpha()
]
for library_name in libraries:
try:
lib = template.get_library(library_name)
except template.InvalidTemplateLibrary, e:
pass
def get_return_data_type(func_name):
"""Return a somewhat-helpful data type given a function name"""
if func_name.startswith('get_'):
if func_name.endswith('_list'):
return 'List'
elif func_name.endswith('_count'):
return 'Integer'
return ''
def get_readable_field_data_type(field):
"""Returns the description for a given field type, if it exists,
Fields' descriptions can contain format strings, which will be interpolated
against the values of field.__dict__ before being output."""
return field.description % field.__dict__
def extract_views_from_urlpatterns(urlpatterns, base=''):
"""
Return a list of views from a list of urlpatterns.
Each object in the returned list is a two-tuple: (view_func, regex)
"""
views = []
for p in urlpatterns:
if hasattr(p, '_get_callback'):
try:
views.append((p._get_callback(), base + p.regex.pattern))
except ViewDoesNotExist:
continue
elif hasattr(p, '_get_url_patterns'):
try:
patterns = p.url_patterns
except ImportError:
continue
views.extend(extract_views_from_urlpatterns(patterns, base + p.regex.pattern))
else:
raise TypeError(_("%s does not appear to be a urlpattern object") % p)
return views
named_group_matcher = re.compile(r'\(\?P(<\w+>).+?\)')
non_named_group_matcher = re.compile(r'\(.*?\)')
def simplify_regex(pattern):
"""
Clean up urlpattern regexes into something somewhat readable by Mere Humans:
turns something like "^(?P<sport_slug>\w+)/athletes/(?P<athlete_slug>\w+)/$"
into "<sport_slug>/athletes/<athlete_slug>/"
"""
# handle named groups first
pattern = named_group_matcher.sub(lambda m: m.group(1), pattern)
# handle non-named groups
pattern = non_named_group_matcher.sub("<var>", pattern)
# clean up any outstanding regex-y characters.
pattern = pattern.replace('^', '').replace('$', '').replace('?', '').replace('//', '/').replace('\\', '')
if not pattern.startswith('/'):
pattern = '/' + pattern
return pattern
| bsd-3-clause |
rproepp/spykeutils | spykeutils/tests/test_tools.py | 1 | 18764 | try:
import unittest2 as ut
assert ut # Suppress pyflakes warning about redefinition of unused ut
except ImportError:
import unittest as ut
from builders import arange_spikes
from numpy.testing import assert_array_equal, assert_array_almost_equal
from spykeutils import tools
import neo
import neo.io.tools
import neo.test.tools
import quantities as pq
import scipy as sp
class TestApplyToDict(ut.TestCase):
@staticmethod
def fn(train, multiplier=1):
return multiplier * train.size
def test_maps_function_to_each_spike_train(self):
st_dict = {'a': [arange_spikes(5 * pq.s), arange_spikes(4 * pq.s)],
'b': [arange_spikes(7 * pq.s)]}
expected = {'a': [4, 3], 'b': [6]}
actual = tools.apply_to_dict(self.fn, st_dict)
self.assertEqual(expected, actual)
def test_works_on_empty_lists(self):
st_dict = {'a': [], 'b': []}
expected = {'a': [], 'b': []}
actual = tools.apply_to_dict(self.fn, st_dict)
self.assertEqual(expected, actual)
def test_works_on_empty_dict(self):
st_dict = {}
expected = {}
actual = tools.apply_to_dict(self.fn, st_dict)
self.assertEqual(expected, actual)
def test_allows_to_pass_additional_args(self):
st_dict = {'a': [arange_spikes(5 * pq.s), arange_spikes(4 * pq.s)],
'b': [arange_spikes(7 * pq.s)]}
expected = {'a': [8, 6], 'b': [12]}
actual = tools.apply_to_dict(self.fn, st_dict, 2)
self.assertEqual(expected, actual)
class TestBinSpikeTrains(ut.TestCase):
def test_bins_spike_train_using_its_properties(self):
a = neo.SpikeTrain(
sp.array([1000.0]) * pq.ms, t_start=500.0 * pq.ms,
t_stop=1500.0 * pq.ms)
sampling_rate = 4.0 * pq.Hz
expected = {0: [sp.array([0, 0, 1, 0])]}
expectedBins = sp.array([0.5, 0.75, 1.0, 1.25, 1.5]) * pq.s
actual, actualBins = tools.bin_spike_trains({0: [a]}, sampling_rate)
self.assertEqual(len(expected), len(actual))
self.assertEqual(len(expected[0]), len(actual[0]))
assert_array_equal(expected[0][0], actual[0][0])
assert_array_almost_equal(
expectedBins, actualBins.rescale(expectedBins.units))
def test_bins_spike_train_using_passed_properties(self):
a = neo.SpikeTrain(
sp.array([1.0]) * pq.s, t_start=0.0 * pq.s, t_stop=5.0 * pq.s)
sampling_rate = 4.0 * pq.Hz
t_start = 0.5 * pq.s
t_stop = 1.5 * pq.s
expected = {0: [sp.array([0, 0, 1, 0])]}
expectedBins = sp.array([0.5, 0.75, 1.0, 1.25, 1.5]) * pq.s
actual, actualBins = tools.bin_spike_trains(
{0: [a]}, sampling_rate=sampling_rate, t_start=t_start,
t_stop=t_stop)
self.assertEqual(len(expected), len(actual))
self.assertEqual(len(expected[0]), len(actual[0]))
assert_array_equal(expected[0][0], actual[0][0])
assert_array_almost_equal(
expectedBins, actualBins.rescale(expectedBins.units))
def test_uses_max_spike_train_interval(self):
a = arange_spikes(5 * pq.s)
b = arange_spikes(7 * pq.s, 15 * pq.s)
sampling_rate = 4.0 * pq.Hz
expectedBins = sp.arange(0.0, 15.1, 0.25) * pq.s
actual, actualBins = tools.bin_spike_trains(
{0: [a, b]}, sampling_rate=sampling_rate)
assert_array_almost_equal(
expectedBins, actualBins.rescale(expectedBins.units))
def test_handles_bin_size_which_is_not_divisor_of_duration(self):
a = arange_spikes(5 * pq.s)
sampling_rate = 1.0 / 1.3 * pq.Hz
expected = {0: [sp.array([1, 1, 1, 1])]}
expectedBins = sp.array([0.0, 1.3, 2.6, 3.9, 5.2]) * pq.s
actual, actualBins = tools.bin_spike_trains({0: [a]}, sampling_rate)
self.assertEqual(len(expected), len(actual))
self.assertEqual(len(expected[0]), len(actual[0]))
assert_array_equal(expected[0][0], actual[0][0])
assert_array_almost_equal(
expectedBins, actualBins.rescale(expectedBins.units))
class TestConcatenateSpikeTrains(ut.TestCase):
def test_concatenates_spike_trains(self):
a = arange_spikes(3.0 * pq.s)
b = arange_spikes(2.0 * pq.s, 5.0 * pq.s)
expected = arange_spikes(5.0 * pq.s)
actual = tools.concatenate_spike_trains((a, b))
assert_array_almost_equal(expected, actual)
def test_t_start_is_min_of_all_trains(self):
a = arange_spikes(3.0 * pq.s, 5.0 * pq.s)
b = arange_spikes(1.0 * pq.s, 6.0 * pq.s)
expected = 1.0 * pq.s
actual = tools.concatenate_spike_trains((a, b)).t_start
self.assertAlmostEqual(expected, actual)
def test_t_stop_is_max_of_all_trains(self):
a = arange_spikes(3.0 * pq.s, 5.0 * pq.s)
b = arange_spikes(1.0 * pq.s, 6.0 * pq.s)
expected = 6.0 * pq.s
actual = tools.concatenate_spike_trains((a, b)).t_stop
self.assertAlmostEqual(expected, actual)
class TestRemoveFromHierarchy(ut.TestCase):
SEGMENTS = 5
CHANNEL_GROUPS = 4
UNITS = 3
CHANNELS = 4
@classmethod
def create_hierarchy(cls, many_to_many):
b = neo.Block()
for ns in range(cls.SEGMENTS):
b.segments.append(neo.Segment())
channels = []
if many_to_many:
channels = [neo.RecordingChannel(name='Shared %d' % i,
index=i + cls.CHANNELS)
for i in range(cls.CHANNELS / 2)]
for ng in range(cls.CHANNEL_GROUPS):
rcg = neo.RecordingChannelGroup()
for nu in range(cls.UNITS):
unit = neo.Unit()
for ns in range(cls.SEGMENTS):
spike = neo.Spike(0 * pq.s)
unit.spikes.append(spike)
b.segments[ns].spikes.append(spike)
st = neo.SpikeTrain([] * pq.s, 0 * pq.s)
unit.spiketrains.append(st)
b.segments[ns].spiketrains.append(st)
rcg.units.append(unit)
if not many_to_many:
for nc in range(cls.CHANNELS):
rc = neo.RecordingChannel(
name='Single %d' % nc, index=nc)
rc.recordingchannelgroups.append(rcg)
rcg.recordingchannels.append(rc)
else:
for nc in range(cls.CHANNELS):
if nc % 2 == 0:
rc = neo.RecordingChannel(
name='Single %d' % (nc / 2), index=nc / 2)
else:
rc = channels[nc / 2]
rc.recordingchannelgroups.append(rcg)
rcg.recordingchannels.append(rc)
rcg.channel_indexes = sp.array(
[c.index for c in rcg.recordingchannels])
rcg.channel_names = sp.array(
[c.name for c in rcg.recordingchannels])
b.recordingchannelgroups.append(rcg)
try:
neo.io.tools.create_many_to_one_relationship(b)
except AttributeError:
b.create_many_to_one_relationship()
return b
def test_remove_block(self):
block = self.create_hierarchy(False)
comp = self.create_hierarchy(False)
tools.remove_from_hierarchy(block)
neo.test.tools.assert_same_sub_schema(block, comp)
def test_remove_segment_no_orphans(self):
block = self.create_hierarchy(False)
comp = self.create_hierarchy(False)
seg = block.segments[1]
tools.remove_from_hierarchy(seg)
self.assertFalse(seg in block.segments)
self.assertEqual(len(block.list_units),
self.UNITS * self.CHANNEL_GROUPS)
for u in block.list_units:
self.assertEqual(len(u.spikes), self.SEGMENTS - 1)
self.assertEqual(len(u.spiketrains), self.SEGMENTS - 1)
neo.test.tools.assert_same_sub_schema(seg, comp.segments[1])
def test_remove_segment_keep_orphans(self):
block = self.create_hierarchy(False)
comp = self.create_hierarchy(False)
seg = block.segments[1]
tools.remove_from_hierarchy(seg, False)
self.assertFalse(seg in block.segments)
self.assertEqual(len(block.list_units),
self.UNITS * self.CHANNEL_GROUPS)
for u in block.list_units:
self.assertEqual(len(u.spikes), self.SEGMENTS)
self.assertEqual(len(u.spiketrains), self.SEGMENTS)
neo.test.tools.assert_same_sub_schema(seg, comp.segments[1])
def test_remove_channel_group_no_orphans(self):
block = self.create_hierarchy(False)
comp = self.create_hierarchy(False)
rcg = block.recordingchannelgroups[1]
tools.remove_from_hierarchy(rcg)
self.assertFalse(rcg in block.recordingchannelgroups)
self.assertEqual(len(block.segments), self.SEGMENTS)
for s in block.segments:
self.assertEqual(len(s.spikes),
self.UNITS * (self.CHANNEL_GROUPS - 1))
self.assertEqual(len(s.spiketrains),
self.UNITS * (self.CHANNEL_GROUPS - 1))
neo.test.tools.assert_same_sub_schema(rcg,
comp.recordingchannelgroups[1])
def test_remove_channel_group_keep_orphans(self):
block = self.create_hierarchy(False)
comp = self.create_hierarchy(False)
rcg = block.recordingchannelgroups[1]
tools.remove_from_hierarchy(rcg, False)
self.assertFalse(rcg in block.recordingchannelgroups)
self.assertEqual(len(block.segments), self.SEGMENTS)
for s in block.segments:
self.assertEqual(len(s.spikes),
self.UNITS * self.CHANNEL_GROUPS)
self.assertEqual(len(s.spiketrains),
self.UNITS * self.CHANNEL_GROUPS)
neo.test.tools.assert_same_sub_schema(rcg,
comp.recordingchannelgroups[1])
def test_remove_channel(self):
block = self.create_hierarchy(False)
comp = self.create_hierarchy(False)
rc = block.list_recordingchannels[5]
tools.remove_from_hierarchy(rc)
self.assertFalse(rc in block.list_recordingchannels)
neo.test.tools.assert_same_sub_schema(rc,
comp.list_recordingchannels[5])
self.assertEqual(len(block.segments), self.SEGMENTS)
self.assertEqual(len(block.recordingchannelgroups),
self.CHANNEL_GROUPS)
self.assertEqual(len(block.list_recordingchannels),
self.CHANNEL_GROUPS * self.CHANNELS - 1)
# Should be removed from its own channel group
rcg = rc.recordingchannelgroups[0]
self.assertEqual(len(rcg.recordingchannels), self.CHANNELS - 1)
self.assertEqual(rcg.channel_indexes.shape[0], self.CHANNELS - 1)
self.assertEqual(rcg.channel_names.shape[0], self.CHANNELS - 1)
self.assertFalse(rc.index in rcg.channel_indexes)
self.assertFalse(rc.name in rcg.channel_names)
def test_remove_unique_channel_many_to_many(self):
block = self.create_hierarchy(True)
comp = self.create_hierarchy(True)
self.assertEqual(
len(block.list_recordingchannels),
self.CHANNEL_GROUPS * (self.CHANNELS / 2) + (self.CHANNELS / 2))
rc = block.list_recordingchannels[0] # Unique channel
tools.remove_from_hierarchy(rc)
neo.test.tools.assert_same_sub_schema(rc,
comp.list_recordingchannels[0])
self.assertFalse(rc in block.list_recordingchannels)
self.assertEqual(len(block.segments), self.SEGMENTS)
self.assertEqual(len(block.recordingchannelgroups),
self.CHANNEL_GROUPS)
self.assertEqual(
len(block.list_recordingchannels),
self.CHANNEL_GROUPS * (self.CHANNELS / 2) + (self.CHANNELS / 2) - 1)
# Should be removed from its own channel group
rcg = rc.recordingchannelgroups[0]
self.assertEqual(len(rcg.recordingchannels), self.CHANNELS - 1)
self.assertEqual(rcg.channel_indexes.shape[0], self.CHANNELS - 1)
self.assertEqual(rcg.channel_names.shape[0], self.CHANNELS - 1)
self.assertFalse(rc.index in rcg.channel_indexes)
self.assertFalse(rc.name in rcg.channel_names)
def test_remove_shared_channel_many_to_many(self):
block = self.create_hierarchy(True)
comp = self.create_hierarchy(True)
self.assertEqual(
len(block.list_recordingchannels),
self.CHANNEL_GROUPS * (self.CHANNELS / 2) + (self.CHANNELS / 2))
rc = block.list_recordingchannels[1] # Shared channel
tools.remove_from_hierarchy(rc)
neo.test.tools.assert_same_sub_schema(rc,
comp.list_recordingchannels[1])
self.assertFalse(rc in block.list_recordingchannels)
self.assertEqual(len(block.segments), self.SEGMENTS)
self.assertEqual(len(block.recordingchannelgroups),
self.CHANNEL_GROUPS)
self.assertEqual(
len(block.list_recordingchannels),
self.CHANNEL_GROUPS * (self.CHANNELS / 2) + (self.CHANNELS / 2) - 1)
# Should be removed from all channel groups
for rcg in block.recordingchannelgroups:
self.assertEqual(len(rcg.recordingchannels), self.CHANNELS - 1)
self.assertEqual(rcg.channel_indexes.shape[0], self.CHANNELS - 1)
self.assertEqual(rcg.channel_names.shape[0], self.CHANNELS - 1)
self.assertFalse(rc.index in rcg.channel_indexes)
self.assertFalse(rc.name in rcg.channel_names)
def test_remove_unit_no_orphans(self):
block = self.create_hierarchy(False)
comp = self.create_hierarchy(False)
unit = block.list_units[5]
tools.remove_from_hierarchy(unit)
self.assertFalse(unit in block.list_units)
self.assertEqual(len(block.list_units),
self.UNITS * self.CHANNEL_GROUPS - 1)
self.assertEqual(len(block.segments), self.SEGMENTS)
self.assertEqual(len(block.recordingchannelgroups),
self.CHANNEL_GROUPS)
for seg in block.segments:
self.assertEqual(len(seg.spikes),
self.UNITS * self.CHANNEL_GROUPS - 1)
self.assertEqual(len(seg.spiketrains),
self.UNITS * self.CHANNEL_GROUPS - 1)
self.assertFalse(unit in [s.unit for s in seg.spikes])
self.assertFalse(unit in [st.unit for st in seg.spiketrains])
neo.test.tools.assert_same_sub_schema(unit, comp.list_units[5])
def test_remove_unit_keep_orphans(self):
block = self.create_hierarchy(False)
comp = self.create_hierarchy(False)
unit = block.list_units[5]
tools.remove_from_hierarchy(unit, False)
self.assertFalse(unit in block.list_units)
self.assertEqual(len(block.list_units),
self.UNITS * self.CHANNEL_GROUPS - 1)
self.assertEqual(len(block.segments), self.SEGMENTS)
self.assertEqual(len(block.recordingchannelgroups),
self.CHANNEL_GROUPS)
for seg in block.segments:
self.assertEqual(len(seg.spikes),
self.UNITS * self.CHANNEL_GROUPS)
self.assertEqual(len(seg.spiketrains),
self.UNITS * self.CHANNEL_GROUPS)
self.assertFalse(unit in [s.unit for s in seg.spikes])
self.assertFalse(unit in [st.unit for st in seg.spiketrains])
neo.test.tools.assert_same_sub_schema(unit, comp.list_units[5])
def test_remove_spike(self):
unit = neo.Unit()
segment = neo.Segment()
s = neo.Spike(0 * pq.s)
unit.spikes.append(s)
segment.spikes.append(s)
s.unit = unit
s.segment = segment
st = neo.SpikeTrain([] * pq.s, 0 * pq.s)
unit.spiketrains.append(st)
segment.spiketrains.append(st)
st.unit = unit
st.segment = segment
tools.remove_from_hierarchy(s)
self.assertTrue(st in unit.spiketrains)
self.assertTrue(st in segment.spiketrains)
self.assertFalse(s in unit.spikes)
self.assertFalse(s in segment.spikes)
def test_remove_spiketrain(self):
unit = neo.Unit()
segment = neo.Segment()
s = neo.Spike(0 * pq.s)
unit.spikes.append(s)
segment.spikes.append(s)
s.unit = unit
s.segment = segment
st = neo.SpikeTrain([] * pq.s, 0 * pq.s)
unit.spiketrains.append(st)
segment.spiketrains.append(st)
st.unit = unit
st.segment = segment
tools.remove_from_hierarchy(st)
self.assertTrue(s in unit.spikes)
self.assertTrue(s in segment.spikes)
self.assertFalse(st in unit.spiketrains)
self.assertFalse(st in segment.spiketrains)
def test_extract_spikes(self):
s1 = sp.zeros(10000)
s2 = sp.ones(10000)
t = sp.arange(0.0, 10.1, 1.0)
sig1 = neo.AnalogSignal(s1 * pq.uV, sampling_rate=pq.kHz)
sig2 = neo.AnalogSignal(s2 * pq.uV, sampling_rate=pq.kHz)
train = neo.SpikeTrain(t * pq.s, 10 * pq.s)
spikes = tools.extract_spikes(
train, [sig1, sig2], 100 * pq.ms, 10 * pq.ms)
self.assertEqual(len(spikes), 9)
for s in spikes:
self.assertAlmostEqual(s.waveform[:, 0].mean(), 0.0)
self.assertAlmostEqual(s.waveform[:, 1].mean(), 1.0)
def test_extract_different_spikes(self):
s1 = sp.ones(10500)
s2 = -sp.ones(10500)
for i in xrange(10):
s1[i * 1000 + 500:i * 1000 + 1500] *= i
s2[i * 1000 + 500:i * 1000 + 1500] *= i
t = sp.arange(0.0, 10.1, 1.0)
sig1 = neo.AnalogSignal(s1 * pq.uV, sampling_rate=pq.kHz)
sig2 = neo.AnalogSignal(s2 * pq.uV, sampling_rate=pq.kHz)
train = neo.SpikeTrain(t * pq.s, 10 * pq.s)
spikes = tools.extract_spikes(
train, [sig1, sig2], 100 * pq.ms, 10 * pq.ms)
self.assertEqual(len(spikes), 10)
for i, s in enumerate(spikes):
self.assertAlmostEqual(s.waveform[:, 0].mean(), i)
self.assertAlmostEqual(s.waveform[:, 1].mean(), -i)
if __name__ == '__main__':
ut.main()
| bsd-3-clause |
museomix/2013_Quebec_thermoscope | raspberry/pygame-1.9.1release/lib/_camera_opencv_highgui.py | 13 | 2229 |
import pygame
import numpy
import opencv
#this is important for capturing/displaying images
from opencv import highgui
def list_cameras():
"""
"""
# -1 for opencv means get any of them.
return [-1]
def init():
pass
def quit():
pass
class Camera:
def __init__(self, device =0, size = (640,480), mode = "RGB"):
"""
"""
self.camera = highgui.cvCreateCameraCapture(device)
if not self.camera:
raise ValueError ("Could not open camera. Sorry.")
def set_controls(self, **kwargs):
"""
"""
def set_resolution(self, width, height):
"""Sets the capture resolution. (without dialog)
"""
# nothing to do here.
pass
def query_image(self):
return True
def stop(self):
pass
def start(self):
# do nothing here... since the camera is already open.
pass
def get_buffer(self):
"""Returns a string containing the raw pixel data.
"""
return self.get_surface().get_buffer()
def get_image(self, dest_surf = None):
return self.get_surface(dest_surf)
def get_surface(self, dest_surf = None):
camera = self.camera
im = highgui.cvQueryFrame(camera)
#convert Ipl image to PIL image
#print type(im)
if im:
xx = opencv.adaptors.Ipl2NumPy(im)
#print type(xx)
#print xx.iscontiguous()
#print dir(xx)
#print xx.shape
xxx = numpy.reshape(xx, (numpy.product(xx.shape),))
if xx.shape[2] != 3:
raise ValueError("not sure what to do about this size")
pg_img = pygame.image.frombuffer(xxx, (xx.shape[1],xx.shape[0]), "RGB")
# if there is a destination surface given, we blit onto that.
if dest_surf:
dest_surf.blit(pg_img, (0,0))
return dest_surf
#return pg_img
if __name__ == "__main__":
# try and use this camera stuff with the pygame camera example.
import pygame.examples.camera
pygame.camera.Camera = Camera
pygame.camera.list_cameras = list_cameras
pygame.examples.camera.main()
| mit |
cortedeltimo/SickRage | lib/libfuturize/fixes/fix_UserDict.py | 51 | 3845 | """Fix UserDict.
Incomplete!
TODO: base this on fix_urllib perhaps?
"""
# Local imports
from lib2to3 import fixer_base
from lib2to3.fixer_util import Name, attr_chain
from lib2to3.fixes.fix_imports import alternates, build_pattern, FixImports
MAPPING = {'UserDict': 'collections',
}
# def alternates(members):
# return "(" + "|".join(map(repr, members)) + ")"
#
#
# def build_pattern(mapping=MAPPING):
# mod_list = ' | '.join(["module_name='%s'" % key for key in mapping])
# bare_names = alternates(mapping.keys())
#
# yield """name_import=import_name< 'import' ((%s) |
# multiple_imports=dotted_as_names< any* (%s) any* >) >
# """ % (mod_list, mod_list)
# yield """import_from< 'from' (%s) 'import' ['(']
# ( any | import_as_name< any 'as' any > |
# import_as_names< any* >) [')'] >
# """ % mod_list
# yield """import_name< 'import' (dotted_as_name< (%s) 'as' any > |
# multiple_imports=dotted_as_names<
# any* dotted_as_name< (%s) 'as' any > any* >) >
# """ % (mod_list, mod_list)
#
# # Find usages of module members in code e.g. thread.foo(bar)
# yield "power< bare_with_attr=(%s) trailer<'.' any > any* >" % bare_names
# class FixUserDict(fixer_base.BaseFix):
class FixUserdict(FixImports):
BM_compatible = True
keep_line_order = True
# This is overridden in fix_imports2.
mapping = MAPPING
# We want to run this fixer late, so fix_import doesn't try to make stdlib
# renames into relative imports.
run_order = 6
def build_pattern(self):
return "|".join(build_pattern(self.mapping))
def compile_pattern(self):
# We override this, so MAPPING can be pragmatically altered and the
# changes will be reflected in PATTERN.
self.PATTERN = self.build_pattern()
super(FixImports, self).compile_pattern()
# Don't match the node if it's within another match.
def match(self, node):
match = super(FixImports, self).match
results = match(node)
if results:
# Module usage could be in the trailer of an attribute lookup, so we
# might have nested matches when "bare_with_attr" is present.
if "bare_with_attr" not in results and \
any(match(obj) for obj in attr_chain(node, "parent")):
return False
return results
return False
def start_tree(self, tree, filename):
super(FixImports, self).start_tree(tree, filename)
self.replace = {}
def transform(self, node, results):
import_mod = results.get("module_name")
if import_mod:
mod_name = import_mod.value
new_name = unicode(self.mapping[mod_name])
import_mod.replace(Name(new_name, prefix=import_mod.prefix))
if "name_import" in results:
# If it's not a "from x import x, y" or "import x as y" import,
# marked its usage to be replaced.
self.replace[mod_name] = new_name
if "multiple_imports" in results:
# This is a nasty hack to fix multiple imports on a line (e.g.,
# "import StringIO, urlparse"). The problem is that I can't
# figure out an easy way to make a pattern recognize the keys of
# MAPPING randomly sprinkled in an import statement.
results = self.match(node)
if results:
self.transform(node, results)
else:
# Replace usage of the module.
bare_name = results["bare_with_attr"][0]
new_name = self.replace.get(bare_name.value)
if new_name:
bare_name.replace(Name(new_name, prefix=bare_name.prefix))
| gpl-3.0 |
pledra/odoo-product-configurator | website_product_configurator_mrp/controllers/main.py | 1 | 1200 | from openerp.http import request
from openerp.addons.website_product_configurator.controllers.main import (
WebsiteProductConfig
)
class WebsiteProductConfigMrp(WebsiteProductConfig):
def cart_update(self, product, post):
if post.get('assembly') == 'kit':
attr_products = product.attribute_value_ids.mapped('product_id')
for product in attr_products:
request.website.sale_get_order(force_create=1)._cart_update(
product_id=int(product.id),
add_qty=float(post.get('add_qty')),
)
else:
request.website.sale_get_order(force_create=1)._cart_update(
product_id=int(product.id),
add_qty=float(post.get('add_qty')),
)
return request.redirect("/shop/cart")
def config_vars(self, product_tmpl, active_step=None, data=None):
res = super(WebsiteProductConfigMrp, self).config_vars(
product_tmpl=product_tmpl, active_step=active_step, data=data)
active_step = res.get('active_step')
if active_step and active_step.product_tmpl_id != product_tmpl:
pass
return res
| agpl-3.0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.