repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
ArneBab/pypyjs | website/demo/home/rfk/repos/pypy/lib-python/2.7/plat-mac/lib-scriptpackages/CodeWarrior/Required.py | 81 | 1664 | """Suite Required: Terms that every application should support
Level 1, version 1
Generated from /Volumes/Sap/Applications (Mac OS 9)/Metrowerks CodeWarrior 7.0/Metrowerks CodeWarrior/CodeWarrior IDE 4.2.5
AETE/AEUT resource version 1/0, language 0, script 0
"""
import aetools
import MacOS
_code = 'reqd'
from StdSuites.Required_Suite import *
class Required_Events(Required_Suite_Events):
_argmap_open = {
'converting' : 'Conv',
}
def open(self, _object, _attributes={}, **_arguments):
"""open: Open the specified object(s)
Required argument: list of objects to open
Keyword argument converting: Whether to convert project to latest version (yes/no; default is ask).
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'aevt'
_subcode = 'odoc'
aetools.keysubst(_arguments, self._argmap_open)
_arguments['----'] = _object
aetools.enumsubst(_arguments, 'Conv', _Enum_Conv)
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_Enum_Conv = {
'yes' : 'yes ', # Convert the project if necessary on open
'no' : 'no ', # Do not convert the project if needed on open
}
#
# Indices of types declared in this module
#
_classdeclarations = {
}
_propdeclarations = {
}
_compdeclarations = {
}
_enumdeclarations = {
'Conv' : _Enum_Conv,
}
| mit |
mixedpuppy/offlineimap | setup.py | 16 | 1493 | #!/usr/bin/env python
# $Id: setup.py,v 1.1 2002/06/21 18:10:49 jgoerzen Exp $
# IMAP synchronization
# Module: installer
# COPYRIGHT #
# Copyright (C) 2002 - 2006 John Goerzen
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# END OF COPYRIGHT #
from distutils.core import setup
import offlineimap
setup(name = "offlineimap",
version = offlineimap.__version__,
description = offlineimap.__description__,
author = offlineimap.__author__,
author_email = offlineimap.__author_email__,
url = offlineimap.__homepage__,
packages = ['offlineimap', 'offlineimap.folder',
'offlineimap.repository', 'offlineimap.ui'],
scripts = ['bin/offlineimap'],
license = offlineimap.__copyright__ + \
", Licensed under the GPL version 2"
)
| gpl-2.0 |
izapolsk/integration_tests | cfme/tests/infrastructure/test_host_drift_analysis.py | 1 | 4959 | import pytest
from cfme import test_requirements
from cfme.common.host_views import HostDriftAnalysis
from cfme.infrastructure.host import Host
from cfme.infrastructure.provider import InfraProvider
from cfme.utils import testgen
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.blockers import BZ
from cfme.utils.wait import wait_for
pytestmark = [
test_requirements.drift,
pytest.mark.tier(3),
pytest.mark.meta(blockers=[BZ(1635126, forced_streams=['5.10'])]),
]
def pytest_generate_tests(metafunc):
argnames, argvalues, idlist = testgen.providers_by_class(
metafunc, [InfraProvider], required_fields=['hosts'])
argnames += ['host']
new_idlist = []
new_argvalues = []
for i, argvalue_tuple in enumerate(argvalues):
args = dict(list(zip(argnames, argvalue_tuple)))
for test_host in args['provider'].data['hosts']:
if not test_host.get('test_fleece', False):
continue
argvs = argvalues[i][:]
new_argvalues.append(argvs + [test_host])
test_id = '{}-{}'.format(args['provider'].key, test_host['type'])
new_idlist.append(test_id)
testgen.parametrize(metafunc, argnames, new_argvalues, ids=new_idlist, scope="module")
@pytest.fixture(scope='module')
def a_host(host, appliance, provider):
host_collection = appliance.collections.hosts
return host_collection.instantiate(name=host.name, provider=provider)
@pytest.fixture(scope='module')
def set_host_credentials(provider, a_host, setup_provider_modscope):
try:
host_data, = [data for data in provider.data['hosts'] if data['name'] == a_host.name]
except ValueError:
pytest.skip('Multiple hosts with the same name found, only expecting one')
a_host.update_credentials_rest(credentials=host_data['credentials'])
yield
a_host.update_credentials_rest(
credentials={'default': Host.Credential(principal='', secret='')})
@pytest.mark.rhv3
def test_host_drift_analysis(appliance, request, a_host, soft_assert, set_host_credentials):
"""Tests host drift analysis
Metadata:
test_flag: host_drift_analysis
Polarion:
assignee: sbulage
casecomponent: SmartState
initialEstimate: 1/3h
"""
# get drift history num
view = navigate_to(a_host, 'Details')
drift_num_orig = int(view.entities.summary('Relationships').get_text_of('Drift History'))
# clear table
col = appliance.collections.tasks.filter({'tab': 'AllTasks'})
col.delete_all()
# initiate 1st analysis
a_host.run_smartstate_analysis(wait_for_task_result=True)
# wait for for drift history num+1
navigate_to(a_host, 'Details')
wait_for(
lambda: (view.entities.summary('Relationships').get_text_of('Drift History') ==
str(drift_num_orig + 1)),
delay=10,
num_sec=360,
message="Waiting for Drift History count to increase",
fail_func=appliance.server.browser.refresh
)
# add a tag and a finalizer to remove it
added_tag = appliance.collections.categories.instantiate(
display_name='Department').collections.tags.instantiate(
display_name='Accounting')
a_host.add_tag(added_tag)
request.addfinalizer(lambda: a_host.remove_tag(added_tag))
# initiate 2nd analysis
a_host.run_smartstate_analysis(wait_for_task_result=True)
# wait for for drift history num+2
navigate_to(a_host, 'Details')
wait_for(
lambda: (view.entities.summary('Relationships').get_text_of('Drift History') ==
str(drift_num_orig + 2)),
delay=10,
num_sec=360,
message="Waiting for Drift History count to increase",
fail_func=appliance.server.browser.refresh
)
# check drift difference
soft_assert(
a_host.equal_drift_results(
'{} (1)'.format(added_tag.category.display_name),
'My Company Tags',
0,
1
),
"Drift analysis results are equal when they shouldn't be"
)
# Test UI features that modify the drift grid
drift_analysis_view = appliance.browser.create_view(HostDriftAnalysis)
# Accounting tag should not be displayed, because it was changed to True
drift_analysis_view.toolbar.same_values_attributes.click()
soft_assert(
not drift_analysis_view.drift_analysis.check_section_attribute_availability(
'{}'.format(added_tag.category.display_name)),
"{} row should be hidden, but not".format(added_tag.display_name))
# Accounting tag should be displayed now
drift_analysis_view.toolbar.different_values_attributes.click()
soft_assert(
drift_analysis_view.drift_analysis.check_section_attribute_availability(
'{} (1)'.format(added_tag.category.display_name)),
"{} row should be visible, but not".format(added_tag.display_name))
| gpl-2.0 |
Semanticle/Semanticle | sm-mt-devel/src/metabulate/tests/test26case-004d.py | 1 | 18359 | '''
Copyright 2009, 2010 Anthony John Machin. All rights reserved.
Supplied subject to The GNU General Public License v3.0
Created on 28 Jan 2009
Last Updated on 10 July 2010
As test20 with tests of:
rules instantiation and query inference
Related:
single dict TS recursion rule plus generic rule + minimal data:
test20simple-001d - unmerged recursive rule EQ order correct QL order correct
test20simple-002d - unmerged recursive rule EQ order correct QL order incorrect
test20simple-003d - merged recursive rule EQ order correct QL order correct variables consistent
test20simple-004d - merged recursive rule EQ order correct QL order correct variables inconsistent (03)
test20simple-005d - merged recursive rule EQ order correct QL order incorrect variables consistent
test20simple-006d - merged recursive rule EQ order correct QL order incorrect variables inconsistent (04)
test20simple-007d - unmerged recursive rule EQ order incorrect QL order correct (05)
test20simple-008d - unmerged recursive rule EQ order incorrect QL order incorrect (06)
test20simple-009d - merged recursive rule EQ order incorrect QL order correct variables consistent
test20simple-010d - merged recursive rule EQ order incorrect QL order correct variables inconsistent (07)
test20simple-011d - merged recursive rule EQ order incorrect QL order incorrect variables consistent
test20simple-012d - merged recursive rule EQ order incorrect QL order incorrect variables inconsistent (08)
single rbtree TS recursion rule plus generic rule + minimal data:
test20simple-001r - unmerged recursive rule EQ order correct QL order correct
test20simple-002r - unmerged recursive rule EQ order correct QL order incorrect
test20simple-003r - merged recursive rule EQ order correct QL order correct variables consistent
test20simple-004r - merged recursive rule EQ order correct QL order correct variables inconsistent (03)
test20simple-005r - merged recursive rule EQ order correct QL order incorrect variables consistent
test20simple-006r - merged recursive rule EQ order correct QL order incorrect variables inconsistent (04)
test20simple-007r - unmerged recursive rule EQ order incorrect QL order correct (05)
test20simple-008r - unmerged recursive rule EQ order incorrect QL order incorrect (06)
test20simple-009r - merged recursive rule EQ order incorrect QL order correct variables consistent
test20simple-010r - merged recursive rule EQ order incorrect QL order correct variables inconsistent (07)
test20simple-011r - merged recursive rule EQ order incorrect QL order incorrect variables consistent
test20simple-012r - merged recursive rule EQ order incorrect QL order incorrect variables inconsistent (08)
@author: Administrator
'''
import metabulate.stores.stores as mtstores
import metabulate.facades.facade as mtfacade
import metabulate.utils.utils as mtutils
import metabulate.utils.debug as mtdebug
import metabulate.renderers.render as mtrender
import metabulate.rules.rules as mtrules
import metabulate.singletons.singleton as mtsingleton
if __name__ == "__main__":
# get default file paths and types
mtconfig = mtsingleton.Settings()._getItem('config')
debug_path = mtconfig._getItem('debugfile_path','%configfilesbase%Debug\\',mtconfig)
debug_type = mtconfig._getItem('debugfile_type','txt',mtconfig)
result_path = mtconfig._getItem('resultsfile_path','%configfilesbase%Results\\',mtconfig)
result_type = mtconfig._getItem('resultsfile_type','txt',mtconfig)
unload_path = mtconfig._getItem('stores_unloadfile_path','%configfilesbase%Unloads\\',mtconfig)
unload_type = mtconfig._getItem('stores_unloadfile_type','pyo',mtconfig)
# set debug criteria
dc22f = mtdebug.Criteria(methods=['_actionPredicate','_actionTriple','_processTriple','_addTriple'],
targets=[mtutils.Flatfile(path=debug_path,
name='DebugOutput_dc22',
type=debug_type)])
dc28 = mtdebug.Criteria(classes=['Query'],methods=['_solve'],notes=['trace'])
# set debug
# d = mtdebug.Debug()
# assign it the criteria
# d._update(criteria=[dc8f,dc12f,dc7f,dc13f,dc10f,dc14f,dc15f])
# d._update(criteria=[dc6,dc20f_dup,dc20f_ok])
# d._update(criteria=[dc11f])
# d._update(criteria=[dc21f])
# d._update(criteria=[dc6,dc20f])
# files
fu = mtutils.Flatfile(path=unload_path,
name='test20r-30_unload_s1',
type=unload_type)
f1 = mtutils.Flatfile(path=result_path,
name='genealogy_test1',
type=result_type)
f3 = mtutils.Flatfile(path=result_path,
name='test20r-30_triples',
type=result_type)
f4 = mtutils.Flatfile(path=result_path,
name='test20r-30_rules',
type=result_type)
f5 = mtutils.Flatfile(path=result_path,
name='test20r-30_queries',
type=result_type)
f6 = mtutils.Flatfile(path=result_path,
name='test20r-30_results',
type=result_type)
# stores
sa = mtstores.TripleStore(structure='dict') # TS sa dict
sr = mtstores.TripleStore() # TS sr
s2 = mtstores.TripleStore()
s3 = mtstores.TripleStore()
s4 = mtstores.TripleStore()
# add namespaces in source stores
sa._addNamespace('mytriples', 'http://www.semanticle.org/triples/')
sa._addNamespace('comtriples', 'http://www.semanticle.com/triples/')
# triples for recursion test
sa._actionTriple("add [('mytriples#bob', 'child_of', 'alice'),('http://www.semanticle.com/triples/#dan', 'child_of', 'cev')]")
sa._actionTriple("add", [('cev', 'child_of', 'http://www.semanticle.org/triples/#bob'),"('http://www.semanticle.com/triples/#dan', 'child_of', 'cev')"])
sa._actionTriple("add", 'eve', 'child_of', 'comtriples#dan')
# sa._actionTriple("add",{('?desc', 'desc_of', '?ancs'):
# [
# [[('?desc', 'child_of', '?ancs')]]
# ,[[('?child', 'child_of', '?ancs')],[('?desc', 'desc_of', '?child')]]
# ,[[('?desc', 'desc_of', '?child')],[('?child', 'child_of', '?ancs')]]
# ]})
sa._actionTriple("add ('?desc', 'desc_of', '?ancs') :- [[[('?desc', 'child_of', '?ancs')]]]") # add rule clause 1 to DTS._queryStore b (or change to DTS s1)
sa._actionTriple("add",{('?obj', '?inv', '?sub'):
[
[[('?inv', 'rev_of', '?forw'),('?forw', 'rev_of', '?inv')]
,[('?sub', "?forw", '?obj')]]
,[[('?inv', 'syn_of', '?inv1'),('?inv1', 'syn_of', '?inv')]
,[('?obj', "?inv1", '?sub')]]
]}) # add rule to DTS._queryStore a (or change to DTS s1)
sa._actionTriple("add","{('?desc1', 'desc_of', '?ancs'):[[[('?child', 'child_of', '?ancs')],[('?desc1', 'desc_of', '?child')]]]}") # add rule clause 2 to DTS._queryStore b (or change to DTS s1)
# sa._actionTriple("add","{('?desc1', 'desc_of', '?ancs'):[[[('?desc1', 'desc_of', '?child')],[('?child', 'child_of', '?ancs')]]]}") # add rule clause 2 to DTS._queryStore b (or change to DTS s1)
sa._actionTriple("add", 'ancs_of', 'rev_of', 'desc_of') # ant
# s1._actionTriple("add", 'desc_of', 'rev_of', 'ancsr_of') # rev ant
sa._actionTriple("add", 'des_of', 'syn_of', 'desc_of') # syn
# s1._actionTriple("add", 'desc_of', 'syn_of', 'descr_of') # rev syn
sa._actionTriple("add", 'anc_of', 'rev_of', 'des_of') # ant of syn
# s1._actionTriple("add", 'ancestor1_of', 'syn_of', 'ancs_of') # syn of ant
sa._actionTriple("add", 'ancestor2_of', 'syn_of', 'anc_of') # syn of ant of syn
# s1._actionTriple("add", 'ancestor3_of', 'syn_of', 'ancestor2_of') # syn of syn of ant of syn
# triples for nested rules test
# s1._actionTriple("add", 'bob', 'is_sex', 'male')
# s1._actionTriple("add", 'cev', 'is_sex', 'male')
# s1._actionTriple("add", 'dan', 'is_sex', 'male')
# s1._actionTriple("add", 'eve', 'is_sex', 'female')
# s1._actionTriple("add", 'nancy', 'mother_of', 'mike')
# s1._actionTriple("add", 'niel', 'father_of', 'mike')
# s1._actionTriple("add", 'mike', 'is_sex', 'male')
# s1._actionPredicate(action="add",
# fact=('?child', 'son_of', '?parent'),
# rule=[[[('?child', 'child_of', '?parent')],
# [('?child', "'is_sex'", "'male'")]]])
# s1._actionPredicate(action="add",
# fact=('?child', 'child_of', '?parent'),
# rule=[[[('?parent', 'father_of', '?child')]],
# [[('?parent', "'mother_of'", '?child')]]])
# Test Load/Unload
# s1._unload(file=f1)
# s0 = s1._load(file=f1)
# print (s0._toString())
# print
print (sa._toString())
# print
# print ('unloading DSTS s1 to fu')
# sa._unload()
# print ('reloading DSTS from fu as sr')
# sr = sr._load()
# print
# print (sr._toString())
# print
# print (s0._toString())
# d = mtdebug.Debug()
# assign it the criteria
# d._update(criteria=[dc19f])
# set Result requests
# rlt04 = mtrules.Result(request=[[('?sub=eve','?pred=child_of','?obj=dan')]]) # pass
# rlt04 = mtrules.Result(request=[[("?sub='*'","?pred='*'","?obj='*'")]]) # pass
# rlt04 = mtrules.Result(request=[[('?sub="*"','?pred="*"','?obj="*"')]]) # pass
# rlt04 = mtrules.Result(request=[[('?sub="?"','?pred','?obj="?"')]]) # FAIL - NO RULES RETURNED (MAYBE OK?)
# rlt04 = mtrules.Result(request=[[("?sub='?'","?pred","?obj='?'")]]) # FAIL - NO RULES RETURNED (MAYBE OK?)
# rlt04 = mtrules.Result(request=[[('?sub=eve', "?pred=desc_of", '?obj=alice')]]) # pass
# rlt04 = mtrules.Result(request=[[('?sub=eve', "?pred=des_of", '?obj=alice')]]) # pass
# rlt04 = mtrules.Result(request=[[('?sub', "?pred=desc_of", '?obj')]]) # pass
# rlt04 = mtrules.Result(request=[[('?sub', "?pred=ancs_of", '?obj')]]) # pass
# rlt04 = mtrules.Result(request=[[('?sub', "?pred=des_of", '?obj')]]) # pass
# rlt04 = mtrules.Result(request=[[('?sub=?','?pred','?obj')
# ,('?sub','?pred=?','?obj')
# ,('?sub','?pred','?obj=?')]]) # pass - all inferences
# rlt04 = mtrules.Result(request=[[('?sub == ?','?pred','?obj')
# ,('?sub','?pred = =?','?obj')
# ,('?sub','?pred','?obj==?')]]) # pass - all rules
rlt04 = mtrules.Result(request=[[('?sub','?pred','?obj')]]) # pass
# rlt04 = mtrules.Result(request=[[('?sub','?pred','?obj')],[('?sub','child_of','dan')]]) # FAIL
# rlt04 = mtrules.Result(request=[[('?sub','?pred','?obj')],['not',('?sub','child_of','dan')]]) # pass
# rlt04 = mtrules.Result(request=[['not',('?sub','child_of','comtriples#dan')],[('?sub','?pred','?obj')]]) # pass
# rlt04 = mtrules.Result(request=[[('?sub','?pred','?obj')],['not',('?sub','child_of','dan')]
# ,['not',('?sub','from','London')]]) # pass
# rlt04 = mtrules.Result(request=[[('?sub','?pred=ancestor3_of','?obj')]]) # pass
rlt05 = mtrules.Result(request=[[("?s","?r=?r1='child_of'","?o")]]) # pass
# rlt02 = mtrules.Result(request=[[('eve', "desc_of", '?person2')]]) # pass
# rlt02 = mtrules.Result(request=[[(('ender', 'from', 'Truro'), "desc_of", '?person2')]]) # pass
# rlt02 = mtrules.Result(request=[[(('ender|eddy', 'from', 'Truro'), "desc_of", '?person2')]]) # pass
# rlt02 = mtrules.Result(request=[[(('?person1', 'from', 'Truro'), "desc_of", '?person2')]]) # pass
# rlt02 = mtrules.Result(request=[[('eve', "desc_of", '?person2')]
# ,[('?person2', "desc_of", 'alice')]]) # pass
# rlt02 = mtrules.Result(request=[[('eve', "des_of", '?person2')]
# ,[('?person2', "des_of", 'alice')]]) # pass - syn of recursed rule
# rlt02 = mtrules.Result(request=[[('eve', "descr_of", '?person2')]
# ,[('?person2', "descr_of", 'alice')]]) # pass - reversed syn of recursed rule
# rlt02 = mtrules.Result(request=[[('alice', "ancs_of", '?person2')]
# ,[('?person2', "ancs_of", 'eve')]]) # pass - ant of recursed rule
# rlt02 = mtrules.Result(request=[[('alice', "ancsr_of", '?person2')]
# ,[('?person2', "ancsr_of", 'eve')]]) # pass - reversed ant of recursed rule
# rlt02 = mtrules.Result(request=[[('alice', "anc_of", '?person2')]
# ,[('?person2', "anc_of", 'eve')]]) # pass - ant of syn of recursed rule
# rlt02 = mtrules.Result(request=[[('alice', "ancestor1_of", '?person2')]
# ,[('?person2', "ancestor1_of", 'eve')]]) # pass - syn of ant of recursed rule
rlt02 = mtrules.Result(request=[[('alice', "ancestor2_of", '?person2')]
,[('?person2', "ancestor2_of", 'eve')]]) # pass - syn of ant of syn of recursed rule
# rlt02 = mtrules.Result(request=[[('alice', "ancestor3_of", '?person2')]
# ,[('?person2', "ancestor3_of", 'eve')]]) # pass - syn of syn of ant of syn of recursed rule
print ('queries defined')
# rendering submission
p0t = mtrender.Sequence(pattern=['?!triples'], # via variable notation
targets=[s3,f3],
render='py')
p0r = mtrender.Sequence(pattern=['?!rules'], # via variable notation
targets=[s4,f4],
render='py')
p0q = mtrender.Sequence(pattern=['?!queries'], # via variable notation
targets=[f5],
render='py')
p1 = mtrender.Sequence(pattern=[('?son', 'son_of', '?person')], # triple propogation
targets=[s2,'display'],
render='csv')
p2 = mtrender.Sequence(pattern=[('?person1', 'desc_of', '?person2')], # triple propogation
targets=[s2,'display'],
render='csv')
p3 = mtrender.Sequence(pattern=['?person2'],
targets=['display'],
render='csv')
p4 = mtrender.Sequence(pattern=[('?sub', '?pred', '?obj')],
targets=[s2,'display'],
render='csv',
URImode='nativealias')
p4a = mtrender.Sequence(pattern=[('?sub', '?pred', '?obj'),('results', 'contain', ('?sub', '?pred', '?obj'))],
targets=[s2,'display'],
render='csv',
URImode='nativealias')
p6 = mtrender.Transformation(pattern=['!!og!!','/^(.)(.*?)(.)$/$3$2$1/'],id='?p6')
#p5 = mtrender.Sequence(pattern=[({('np2',p2):{'og':'?o'}},'is known by','?s')])
p5 = mtrender.Sequence(pattern=[({('np6','!p6'):{'?og':'?o'}},'is known by','?s')],
targets=['display'],
render='csv')
print ('Renders defined')
# d = mtdebug.Debug()
# assign it the criteria
# d._update(criteria=[dc16f])
# set query
rlt02._update(outputs=[p3])
face02 = mtfacade.Facade(store=sa,
results=[rlt02])
# rlt04._update(outputs=[p4,p0t,p0r,p0q])
rlt04._update(outputs=[p4])
face04 = mtfacade.Facade(store=sa,
results=[rlt04])
rlt05._update(outputs=[p5])
face05 = mtfacade.Facade(store=sa,
results=[rlt05])
print ('results and facades defined')
# reset dubug criteria
# execute the query
# s1._update(infer=False)
face04._generate()
print
# check output channelled to a store
print ('results instantiated')
print (s2._toString())
print ('should be 60 results')
print
print ('contributory triples instantiated')
print (s3._toString())
print ('contributory rules instantiated')
print (s4._toString())
# print ('source Store again')
# print (sr._toString())
| gpl-2.0 |
40223119/2015cda | static/Brython3.1.1-20150328-091302/Lib/codecs.py | 739 | 35436 | """ codecs -- Python Codec Registry, API and helpers.
Written by Marc-Andre Lemburg ([email protected]).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""#"
import builtins, sys
### Registry and builtin stateless codec functions
try:
from _codecs import *
except ImportError as why:
raise SystemError('Failed to load the builtin codecs: %s' % why)
__all__ = ["register", "lookup", "open", "EncodedFile", "BOM", "BOM_BE",
"BOM_LE", "BOM32_BE", "BOM32_LE", "BOM64_BE", "BOM64_LE",
"BOM_UTF8", "BOM_UTF16", "BOM_UTF16_LE", "BOM_UTF16_BE",
"BOM_UTF32", "BOM_UTF32_LE", "BOM_UTF32_BE",
"strict_errors", "ignore_errors", "replace_errors",
"xmlcharrefreplace_errors",
"register_error", "lookup_error"]
### Constants
#
# Byte Order Mark (BOM = ZERO WIDTH NO-BREAK SPACE = U+FEFF)
# and its possible byte string values
# for UTF8/UTF16/UTF32 output and little/big endian machines
#
# UTF-8
BOM_UTF8 = b'\xef\xbb\xbf'
# UTF-16, little endian
BOM_LE = BOM_UTF16_LE = b'\xff\xfe'
# UTF-16, big endian
BOM_BE = BOM_UTF16_BE = b'\xfe\xff'
# UTF-32, little endian
BOM_UTF32_LE = b'\xff\xfe\x00\x00'
# UTF-32, big endian
BOM_UTF32_BE = b'\x00\x00\xfe\xff'
if sys.byteorder == 'little':
# UTF-16, native endianness
BOM = BOM_UTF16 = BOM_UTF16_LE
# UTF-32, native endianness
BOM_UTF32 = BOM_UTF32_LE
else:
# UTF-16, native endianness
BOM = BOM_UTF16 = BOM_UTF16_BE
# UTF-32, native endianness
BOM_UTF32 = BOM_UTF32_BE
# Old broken names (don't use in new code)
BOM32_LE = BOM_UTF16_LE
BOM32_BE = BOM_UTF16_BE
BOM64_LE = BOM_UTF32_LE
BOM64_BE = BOM_UTF32_BE
### Codec base classes (defining the API)
class CodecInfo(tuple):
def __new__(cls, encode, decode, streamreader=None, streamwriter=None,
incrementalencoder=None, incrementaldecoder=None, name=None):
self = tuple.__new__(cls, (encode, decode, streamreader, streamwriter))
self.name = name
self.encode = encode
self.decode = decode
self.incrementalencoder = incrementalencoder
self.incrementaldecoder = incrementaldecoder
self.streamwriter = streamwriter
self.streamreader = streamreader
return self
def __repr__(self):
return "<%s.%s object for encoding %s at 0x%x>" % \
(self.__class__.__module__, self.__class__.__name__,
self.name, id(self))
class Codec:
""" Defines the interface for stateless encoders/decoders.
The .encode()/.decode() methods may use different error
handling schemes by providing the errors argument. These
string values are predefined:
'strict' - raise a ValueError error (or a subclass)
'ignore' - ignore the character and continue with the next
'replace' - replace with a suitable replacement character;
Python will use the official U+FFFD REPLACEMENT
CHARACTER for the builtin Unicode codecs on
decoding and '?' on encoding.
'surrogateescape' - replace with private codepoints U+DCnn.
'xmlcharrefreplace' - Replace with the appropriate XML
character reference (only for encoding).
'backslashreplace' - Replace with backslashed escape sequences
(only for encoding).
The set of allowed values can be extended via register_error.
"""
def encode(self, input, errors='strict'):
""" Encodes the object input and returns a tuple (output
object, length consumed).
errors defines the error handling to apply. It defaults to
'strict' handling.
The method may not store state in the Codec instance. Use
StreamCodec for codecs which have to keep state in order to
make encoding/decoding efficient.
The encoder must be able to handle zero length input and
return an empty object of the output object type in this
situation.
"""
raise NotImplementedError
def decode(self, input, errors='strict'):
""" Decodes the object input and returns a tuple (output
object, length consumed).
input must be an object which provides the bf_getreadbuf
buffer slot. Python strings, buffer objects and memory
mapped files are examples of objects providing this slot.
errors defines the error handling to apply. It defaults to
'strict' handling.
The method may not store state in the Codec instance. Use
StreamCodec for codecs which have to keep state in order to
make encoding/decoding efficient.
The decoder must be able to handle zero length input and
return an empty object of the output object type in this
situation.
"""
raise NotImplementedError
class IncrementalEncoder(object):
"""
An IncrementalEncoder encodes an input in multiple steps. The input can
be passed piece by piece to the encode() method. The IncrementalEncoder
remembers the state of the encoding process between calls to encode().
"""
def __init__(self, errors='strict'):
"""
Creates an IncrementalEncoder instance.
The IncrementalEncoder may use different error handling schemes by
providing the errors keyword argument. See the module docstring
for a list of possible values.
"""
self.errors = errors
self.buffer = ""
def encode(self, input, final=False):
"""
Encodes input and returns the resulting object.
"""
raise NotImplementedError
def reset(self):
"""
Resets the encoder to the initial state.
"""
def getstate(self):
"""
Return the current state of the encoder.
"""
return 0
def setstate(self, state):
"""
Set the current state of the encoder. state must have been
returned by getstate().
"""
class BufferedIncrementalEncoder(IncrementalEncoder):
"""
This subclass of IncrementalEncoder can be used as the baseclass for an
incremental encoder if the encoder must keep some of the output in a
buffer between calls to encode().
"""
def __init__(self, errors='strict'):
IncrementalEncoder.__init__(self, errors)
# unencoded input that is kept between calls to encode()
self.buffer = ""
def _buffer_encode(self, input, errors, final):
# Overwrite this method in subclasses: It must encode input
# and return an (output, length consumed) tuple
raise NotImplementedError
def encode(self, input, final=False):
# encode input (taking the buffer into account)
data = self.buffer + input
(result, consumed) = self._buffer_encode(data, self.errors, final)
# keep unencoded input until the next call
self.buffer = data[consumed:]
return result
def reset(self):
IncrementalEncoder.reset(self)
self.buffer = ""
def getstate(self):
return self.buffer or 0
def setstate(self, state):
self.buffer = state or ""
class IncrementalDecoder(object):
"""
An IncrementalDecoder decodes an input in multiple steps. The input can
be passed piece by piece to the decode() method. The IncrementalDecoder
remembers the state of the decoding process between calls to decode().
"""
def __init__(self, errors='strict'):
"""
Create a IncrementalDecoder instance.
The IncrementalDecoder may use different error handling schemes by
providing the errors keyword argument. See the module docstring
for a list of possible values.
"""
self.errors = errors
def decode(self, input, final=False):
"""
Decode input and returns the resulting object.
"""
raise NotImplementedError
def reset(self):
"""
Reset the decoder to the initial state.
"""
def getstate(self):
"""
Return the current state of the decoder.
This must be a (buffered_input, additional_state_info) tuple.
buffered_input must be a bytes object containing bytes that
were passed to decode() that have not yet been converted.
additional_state_info must be a non-negative integer
representing the state of the decoder WITHOUT yet having
processed the contents of buffered_input. In the initial state
and after reset(), getstate() must return (b"", 0).
"""
return (b"", 0)
def setstate(self, state):
"""
Set the current state of the decoder.
state must have been returned by getstate(). The effect of
setstate((b"", 0)) must be equivalent to reset().
"""
class BufferedIncrementalDecoder(IncrementalDecoder):
"""
This subclass of IncrementalDecoder can be used as the baseclass for an
incremental decoder if the decoder must be able to handle incomplete
byte sequences.
"""
def __init__(self, errors='strict'):
IncrementalDecoder.__init__(self, errors)
# undecoded input that is kept between calls to decode()
self.buffer = b""
def _buffer_decode(self, input, errors, final):
# Overwrite this method in subclasses: It must decode input
# and return an (output, length consumed) tuple
raise NotImplementedError
def decode(self, input, final=False):
# decode input (taking the buffer into account)
data = self.buffer + input
(result, consumed) = self._buffer_decode(data, self.errors, final)
# keep undecoded input until the next call
self.buffer = data[consumed:]
return result
def reset(self):
IncrementalDecoder.reset(self)
self.buffer = b""
def getstate(self):
# additional state info is always 0
return (self.buffer, 0)
def setstate(self, state):
# ignore additional state info
self.buffer = state[0]
#
# The StreamWriter and StreamReader class provide generic working
# interfaces which can be used to implement new encoding submodules
# very easily. See encodings/utf_8.py for an example on how this is
# done.
#
class StreamWriter(Codec):
def __init__(self, stream, errors='strict'):
""" Creates a StreamWriter instance.
stream must be a file-like object open for writing
(binary) data.
The StreamWriter may use different error handling
schemes by providing the errors keyword argument. These
parameters are predefined:
'strict' - raise a ValueError (or a subclass)
'ignore' - ignore the character and continue with the next
'replace'- replace with a suitable replacement character
'xmlcharrefreplace' - Replace with the appropriate XML
character reference.
'backslashreplace' - Replace with backslashed escape
sequences (only for encoding).
The set of allowed parameter values can be extended via
register_error.
"""
self.stream = stream
self.errors = errors
def write(self, object):
""" Writes the object's contents encoded to self.stream.
"""
data, consumed = self.encode(object, self.errors)
self.stream.write(data)
def writelines(self, list):
""" Writes the concatenated list of strings to the stream
using .write().
"""
self.write(''.join(list))
def reset(self):
""" Flushes and resets the codec buffers used for keeping state.
Calling this method should ensure that the data on the
output is put into a clean state, that allows appending
of new fresh data without having to rescan the whole
stream to recover state.
"""
pass
def seek(self, offset, whence=0):
self.stream.seek(offset, whence)
if whence == 0 and offset == 0:
self.reset()
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
###
class StreamReader(Codec):
charbuffertype = str
def __init__(self, stream, errors='strict'):
""" Creates a StreamReader instance.
stream must be a file-like object open for reading
(binary) data.
The StreamReader may use different error handling
schemes by providing the errors keyword argument. These
parameters are predefined:
'strict' - raise a ValueError (or a subclass)
'ignore' - ignore the character and continue with the next
'replace'- replace with a suitable replacement character;
The set of allowed parameter values can be extended via
register_error.
"""
self.stream = stream
self.errors = errors
self.bytebuffer = b""
self._empty_charbuffer = self.charbuffertype()
self.charbuffer = self._empty_charbuffer
self.linebuffer = None
def decode(self, input, errors='strict'):
raise NotImplementedError
def read(self, size=-1, chars=-1, firstline=False):
""" Decodes data from the stream self.stream and returns the
resulting object.
chars indicates the number of characters to read from the
stream. read() will never return more than chars
characters, but it might return less, if there are not enough
characters available.
size indicates the approximate maximum number of bytes to
read from the stream for decoding purposes. The decoder
can modify this setting as appropriate. The default value
-1 indicates to read and decode as much as possible. size
is intended to prevent having to decode huge files in one
step.
If firstline is true, and a UnicodeDecodeError happens
after the first line terminator in the input only the first line
will be returned, the rest of the input will be kept until the
next call to read().
The method should use a greedy read strategy meaning that
it should read as much data as is allowed within the
definition of the encoding and the given size, e.g. if
optional encoding endings or state markers are available
on the stream, these should be read too.
"""
# If we have lines cached, first merge them back into characters
if self.linebuffer:
self.charbuffer = self._empty_charbuffer.join(self.linebuffer)
self.linebuffer = None
# read until we get the required number of characters (if available)
while True:
# can the request be satisfied from the character buffer?
if chars < 0:
if size < 0:
if self.charbuffer:
break
elif len(self.charbuffer) >= size:
break
else:
if len(self.charbuffer) >= chars:
break
# we need more data
if size < 0:
newdata = self.stream.read()
else:
newdata = self.stream.read(size)
# decode bytes (those remaining from the last call included)
data = self.bytebuffer + newdata
try:
newchars, decodedbytes = self.decode(data, self.errors)
except UnicodeDecodeError as exc:
if firstline:
newchars, decodedbytes = \
self.decode(data[:exc.start], self.errors)
lines = newchars.splitlines(keepends=True)
if len(lines)<=1:
raise
else:
raise
# keep undecoded bytes until the next call
self.bytebuffer = data[decodedbytes:]
# put new characters in the character buffer
self.charbuffer += newchars
# there was no data available
if not newdata:
break
if chars < 0:
# Return everything we've got
result = self.charbuffer
self.charbuffer = self._empty_charbuffer
else:
# Return the first chars characters
result = self.charbuffer[:chars]
self.charbuffer = self.charbuffer[chars:]
return result
def readline(self, size=None, keepends=True):
""" Read one line from the input stream and return the
decoded data.
size, if given, is passed as size argument to the
read() method.
"""
# If we have lines cached from an earlier read, return
# them unconditionally
if self.linebuffer:
line = self.linebuffer[0]
del self.linebuffer[0]
if len(self.linebuffer) == 1:
# revert to charbuffer mode; we might need more data
# next time
self.charbuffer = self.linebuffer[0]
self.linebuffer = None
if not keepends:
line = line.splitlines(keepends=False)[0]
return line
readsize = size or 72
line = self._empty_charbuffer
# If size is given, we call read() only once
while True:
data = self.read(readsize, firstline=True)
if data:
# If we're at a "\r" read one extra character (which might
# be a "\n") to get a proper line ending. If the stream is
# temporarily exhausted we return the wrong line ending.
if (isinstance(data, str) and data.endswith("\r")) or \
(isinstance(data, bytes) and data.endswith(b"\r")):
data += self.read(size=1, chars=1)
line += data
lines = line.splitlines(keepends=True)
if lines:
if len(lines) > 1:
# More than one line result; the first line is a full line
# to return
line = lines[0]
del lines[0]
if len(lines) > 1:
# cache the remaining lines
lines[-1] += self.charbuffer
self.linebuffer = lines
self.charbuffer = None
else:
# only one remaining line, put it back into charbuffer
self.charbuffer = lines[0] + self.charbuffer
if not keepends:
line = line.splitlines(keepends=False)[0]
break
line0withend = lines[0]
line0withoutend = lines[0].splitlines(keepends=False)[0]
if line0withend != line0withoutend: # We really have a line end
# Put the rest back together and keep it until the next call
self.charbuffer = self._empty_charbuffer.join(lines[1:]) + \
self.charbuffer
if keepends:
line = line0withend
else:
line = line0withoutend
break
# we didn't get anything or this was our only try
if not data or size is not None:
if line and not keepends:
line = line.splitlines(keepends=False)[0]
break
if readsize < 8000:
readsize *= 2
return line
def readlines(self, sizehint=None, keepends=True):
""" Read all lines available on the input stream
and return them as list of lines.
Line breaks are implemented using the codec's decoder
method and are included in the list entries.
sizehint, if given, is ignored since there is no efficient
way to finding the true end-of-line.
"""
data = self.read()
return data.splitlines(keepends)
def reset(self):
""" Resets the codec buffers used for keeping state.
Note that no stream repositioning should take place.
This method is primarily intended to be able to recover
from decoding errors.
"""
self.bytebuffer = b""
self.charbuffer = self._empty_charbuffer
self.linebuffer = None
def seek(self, offset, whence=0):
""" Set the input stream's current position.
Resets the codec buffers used for keeping state.
"""
self.stream.seek(offset, whence)
self.reset()
def __next__(self):
""" Return the next decoded line from the input stream."""
line = self.readline()
if line:
return line
raise StopIteration
def __iter__(self):
return self
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
###
class StreamReaderWriter:
""" StreamReaderWriter instances allow wrapping streams which
work in both read and write modes.
The design is such that one can use the factory functions
returned by the codec.lookup() function to construct the
instance.
"""
# Optional attributes set by the file wrappers below
encoding = 'unknown'
def __init__(self, stream, Reader, Writer, errors='strict'):
""" Creates a StreamReaderWriter instance.
stream must be a Stream-like object.
Reader, Writer must be factory functions or classes
providing the StreamReader, StreamWriter interface resp.
Error handling is done in the same way as defined for the
StreamWriter/Readers.
"""
self.stream = stream
self.reader = Reader(stream, errors)
self.writer = Writer(stream, errors)
self.errors = errors
def read(self, size=-1):
return self.reader.read(size)
def readline(self, size=None):
return self.reader.readline(size)
def readlines(self, sizehint=None):
return self.reader.readlines(sizehint)
def __next__(self):
""" Return the next decoded line from the input stream."""
return next(self.reader)
def __iter__(self):
return self
def write(self, data):
return self.writer.write(data)
def writelines(self, list):
return self.writer.writelines(list)
def reset(self):
self.reader.reset()
self.writer.reset()
def seek(self, offset, whence=0):
self.stream.seek(offset, whence)
self.reader.reset()
if whence == 0 and offset == 0:
self.writer.reset()
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
# these are needed to make "with codecs.open(...)" work properly
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
###
class StreamRecoder:
""" StreamRecoder instances provide a frontend - backend
view of encoding data.
They use the complete set of APIs returned by the
codecs.lookup() function to implement their task.
Data written to the stream is first decoded into an
intermediate format (which is dependent on the given codec
combination) and then written to the stream using an instance
of the provided Writer class.
In the other direction, data is read from the stream using a
Reader instance and then return encoded data to the caller.
"""
# Optional attributes set by the file wrappers below
data_encoding = 'unknown'
file_encoding = 'unknown'
def __init__(self, stream, encode, decode, Reader, Writer,
errors='strict'):
""" Creates a StreamRecoder instance which implements a two-way
conversion: encode and decode work on the frontend (the
input to .read() and output of .write()) while
Reader and Writer work on the backend (reading and
writing to the stream).
You can use these objects to do transparent direct
recodings from e.g. latin-1 to utf-8 and back.
stream must be a file-like object.
encode, decode must adhere to the Codec interface, Reader,
Writer must be factory functions or classes providing the
StreamReader, StreamWriter interface resp.
encode and decode are needed for the frontend translation,
Reader and Writer for the backend translation. Unicode is
used as intermediate encoding.
Error handling is done in the same way as defined for the
StreamWriter/Readers.
"""
self.stream = stream
self.encode = encode
self.decode = decode
self.reader = Reader(stream, errors)
self.writer = Writer(stream, errors)
self.errors = errors
def read(self, size=-1):
data = self.reader.read(size)
data, bytesencoded = self.encode(data, self.errors)
return data
def readline(self, size=None):
if size is None:
data = self.reader.readline()
else:
data = self.reader.readline(size)
data, bytesencoded = self.encode(data, self.errors)
return data
def readlines(self, sizehint=None):
data = self.reader.read()
data, bytesencoded = self.encode(data, self.errors)
return data.splitlines(keepends=True)
def __next__(self):
""" Return the next decoded line from the input stream."""
data = next(self.reader)
data, bytesencoded = self.encode(data, self.errors)
return data
def __iter__(self):
return self
def write(self, data):
data, bytesdecoded = self.decode(data, self.errors)
return self.writer.write(data)
def writelines(self, list):
data = ''.join(list)
data, bytesdecoded = self.decode(data, self.errors)
return self.writer.write(data)
def reset(self):
self.reader.reset()
self.writer.reset()
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
### Shortcuts
def open(filename, mode='rb', encoding=None, errors='strict', buffering=1):
""" Open an encoded file using the given mode and return
a wrapped version providing transparent encoding/decoding.
Note: The wrapped version will only accept the object format
defined by the codecs, i.e. Unicode objects for most builtin
codecs. Output is also codec dependent and will usually be
Unicode as well.
Files are always opened in binary mode, even if no binary mode
was specified. This is done to avoid data loss due to encodings
using 8-bit values. The default file mode is 'rb' meaning to
open the file in binary read mode.
encoding specifies the encoding which is to be used for the
file.
errors may be given to define the error handling. It defaults
to 'strict' which causes ValueErrors to be raised in case an
encoding error occurs.
buffering has the same meaning as for the builtin open() API.
It defaults to line buffered.
The returned wrapped file object provides an extra attribute
.encoding which allows querying the used encoding. This
attribute is only available if an encoding was specified as
parameter.
"""
if encoding is not None and \
'b' not in mode:
# Force opening of the file in binary mode
mode = mode + 'b'
file = builtins.open(filename, mode, buffering)
if encoding is None:
return file
info = lookup(encoding)
srw = StreamReaderWriter(file, info.streamreader, info.streamwriter, errors)
# Add attributes to simplify introspection
srw.encoding = encoding
return srw
def EncodedFile(file, data_encoding, file_encoding=None, errors='strict'):
""" Return a wrapped version of file which provides transparent
encoding translation.
Strings written to the wrapped file are interpreted according
to the given data_encoding and then written to the original
file as string using file_encoding. The intermediate encoding
will usually be Unicode but depends on the specified codecs.
Strings are read from the file using file_encoding and then
passed back to the caller as string using data_encoding.
If file_encoding is not given, it defaults to data_encoding.
errors may be given to define the error handling. It defaults
to 'strict' which causes ValueErrors to be raised in case an
encoding error occurs.
The returned wrapped file object provides two extra attributes
.data_encoding and .file_encoding which reflect the given
parameters of the same name. The attributes can be used for
introspection by Python programs.
"""
if file_encoding is None:
file_encoding = data_encoding
data_info = lookup(data_encoding)
file_info = lookup(file_encoding)
sr = StreamRecoder(file, data_info.encode, data_info.decode,
file_info.streamreader, file_info.streamwriter, errors)
# Add attributes to simplify introspection
sr.data_encoding = data_encoding
sr.file_encoding = file_encoding
return sr
### Helpers for codec lookup
def getencoder(encoding):
""" Lookup up the codec for the given encoding and return
its encoder function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).encode
def getdecoder(encoding):
""" Lookup up the codec for the given encoding and return
its decoder function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).decode
def getincrementalencoder(encoding):
""" Lookup up the codec for the given encoding and return
its IncrementalEncoder class or factory function.
Raises a LookupError in case the encoding cannot be found
or the codecs doesn't provide an incremental encoder.
"""
encoder = lookup(encoding).incrementalencoder
if encoder is None:
raise LookupError(encoding)
return encoder
def getincrementaldecoder(encoding):
""" Lookup up the codec for the given encoding and return
its IncrementalDecoder class or factory function.
Raises a LookupError in case the encoding cannot be found
or the codecs doesn't provide an incremental decoder.
"""
decoder = lookup(encoding).incrementaldecoder
if decoder is None:
raise LookupError(encoding)
return decoder
def getreader(encoding):
""" Lookup up the codec for the given encoding and return
its StreamReader class or factory function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).streamreader
def getwriter(encoding):
""" Lookup up the codec for the given encoding and return
its StreamWriter class or factory function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).streamwriter
def iterencode(iterator, encoding, errors='strict', **kwargs):
"""
Encoding iterator.
Encodes the input strings from the iterator using a IncrementalEncoder.
errors and kwargs are passed through to the IncrementalEncoder
constructor.
"""
encoder = getincrementalencoder(encoding)(errors, **kwargs)
for input in iterator:
output = encoder.encode(input)
if output:
yield output
output = encoder.encode("", True)
if output:
yield output
def iterdecode(iterator, encoding, errors='strict', **kwargs):
"""
Decoding iterator.
Decodes the input strings from the iterator using a IncrementalDecoder.
errors and kwargs are passed through to the IncrementalDecoder
constructor.
"""
decoder = getincrementaldecoder(encoding)(errors, **kwargs)
for input in iterator:
output = decoder.decode(input)
if output:
yield output
output = decoder.decode(b"", True)
if output:
yield output
### Helpers for charmap-based codecs
def make_identity_dict(rng):
""" make_identity_dict(rng) -> dict
Return a dictionary where elements of the rng sequence are
mapped to themselves.
"""
return {i:i for i in rng}
def make_encoding_map(decoding_map):
""" Creates an encoding map from a decoding map.
If a target mapping in the decoding map occurs multiple
times, then that target is mapped to None (undefined mapping),
causing an exception when encountered by the charmap codec
during translation.
One example where this happens is cp875.py which decodes
multiple character to \u001a.
"""
m = {}
for k,v in decoding_map.items():
if not v in m:
m[v] = k
else:
m[v] = None
return m
### error handlers
try:
strict_errors = lookup_error("strict")
ignore_errors = lookup_error("ignore")
replace_errors = lookup_error("replace")
xmlcharrefreplace_errors = lookup_error("xmlcharrefreplace")
backslashreplace_errors = lookup_error("backslashreplace")
except LookupError:
# In --disable-unicode builds, these error handler are missing
strict_errors = None
ignore_errors = None
replace_errors = None
xmlcharrefreplace_errors = None
backslashreplace_errors = None
# Tell modulefinder that using codecs probably needs the encodings
# package
_false = 0
if _false:
import encodings
### Tests
if __name__ == '__main__':
# Make stdout translate Latin-1 output into UTF-8 output
sys.stdout = EncodedFile(sys.stdout, 'latin-1', 'utf-8')
# Have stdin translate Latin-1 input into UTF-8 input
sys.stdin = EncodedFile(sys.stdin, 'utf-8', 'latin-1')
| gpl-3.0 |
boada/vpCluster | data/boada/may_2012/analysis/c260p61+32p13/stack_dithers.py | 6 | 2253 | import pyfits as pyf
from glob import glob
import os
from astLib.astStats import clippedMedianStdev
from numpy import average, delete
import numpy as np
def skySubtract(data):
sums = [sum(data[i, :]) for i in range(data.shape[0])]
med = clippedMedianStdev(sums)
med = med['clippedMedian']
skyfibers = [i for i in range(data.shape[0]) if sum(data[i, :]) <= med]
skydata = data.take(skyfibers, axis=0)
skyflux = [average(skydata[:, i]) for i in range(skydata.shape[1])]
return skyflux
files = glob('bcs*.fits')
for f in files:
oimg = pyf.open(f)
obj = oimg[1].header['object'].split('_')
print f, obj
field, dither, num = obj[1].split()
# Correct for a typo in the naming.
if obj[0] == 'c205p08+46p7':
obj[0] = 'c250p08+46p7'
# load data and skysubtract
data = oimg[1].data
dataHDR = oimg[1].header
sky = skySubtract(data)
if not os.path.isfile(obj[0] + '_' + field + '_' + num + '.fits'):
# rewriting the whole file because that is easy to update
oimg.writeto(obj[0] + '_' + field + '_' + num + '.fits')
# update with sky subtraction
pyf.update(obj[0] + '_' + field + '_' + num + '.fits', data, dataHDR,
1)
else:
# Here's the data we are going to add to
img = pyf.open(obj[0] + '_' + field + '_' + num + '.fits')
data1 = img[1].data
dataHDR1 = img[1].header
try:
pyf.update(obj[0] + '_' + field + '_' + num + '.fits',
data1 + data, dataHDR, 1)
except ValueError:
print 'Different lengths'
# Make sure all of the arrays are the same length
if data.shape[1] > data1.shape[1]:
sky.pop(-1 * (data.shape[1] - data1.shape[1]))
data = delete(data, -1 * (data.shape[1] - data1.shape[1]), 1)
elif data.shape[1] < data1.shape[1]:
data1 = delete(data1, -1 * (data1.shape[1] - data.shape[1]), 1)
else:
print "I don't know what to do!"
# UPDATE!!!
pyf.update(obj[0] + '_' + field + '_' + num + '.fits',
data1 + data, dataHDR, 1)
img.close()
oimg.close()
| mit |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/win32com/makegw/makegw.py | 18 | 16995 | """Utility functions for writing out gateway C++ files
This module will generate a C++/Python binding for a specific COM
interface.
At this stage, no command line interface exists. You must start Python,
import this module, change to the directory where the generated code should
be written, and run the public function.
This module is capable of generating both 'Interfaces' (ie, Python
client side support for the interface) and 'Gateways' (ie, Python
server side support for the interface). Many COM interfaces are useful
both as Client and Server. Other interfaces, however, really only make
sense to implement one side or the other. For example, it would be pointless
for Python to implement Server side for 'IRunningObjectTable', unless we were
implementing core COM for an operating system in Python (hey - now there's an idea!)
Most COM interface code is totally boiler-plate - it consists of
converting arguments, dispatching the call to Python, and processing
any result values.
This module automates the generation of such code. It has the ability to
parse a .H file generated by the MIDL tool (ie, almost all COM .h files)
and build almost totally complete C++ code.
The module understands some of the well known data types, and how to
convert them. There are only a couple of places where hand-editing is
necessary, as detailed below:
unsupported types -- If a type is not known, the generator will
pretty much ignore it, but write a comment to the generated code. You
may want to add custom support for this type. In some cases, C++ compile errors
will result. These are intentional - generating code to remove these errors would
imply a false sense of security that the generator has done the right thing.
other return policies -- By default, Python never sees the return SCODE from
a COM function. The interface usually returns None if OK, else a COM exception
if "FAILED(scode)" is TRUE. You may need to change this if:
* EXCEPINFO is passed to the COM function. This is not detected and handled
* For some reason Python should always see the result SCODE, even if it
did fail or succeed. For example, some functions return a BOOLEAN result
in the SCODE, meaning Python should always see it.
* FAILED(scode) for the interface still has valid data to return (by default,
the code generated does not process the return values, and raise an exception
to Python/COM
"""
import re
import makegwparse
def make_framework_support(header_file_name, interface_name, bMakeInterface = 1, bMakeGateway = 1):
"""Generate C++ code for a Python Interface and Gateway
header_file_name -- The full path to the .H file which defines the interface.
interface_name -- The name of the interface to search for, and to generate.
bMakeInterface = 1 -- Should interface (ie, client) support be generated.
bMakeGatewayInterface = 1 -- Should gateway (ie, server) support be generated.
This method will write a .cpp and .h file into the current directory,
(using the name of the interface to build the file name.
"""
fin=open(header_file_name)
try:
interface = makegwparse.parse_interface_info(interface_name, fin)
finally:
fin.close()
if bMakeInterface and bMakeGateway:
desc = "Interface and Gateway"
elif bMakeInterface and not bMakeGateway:
desc = "Interface"
else:
desc = "Gateway"
if interface.name[:5]=="IEnum": # IEnum - use my really simple template-based one
import win32com.makegw.makegwenum
ifc_cpp_writer = win32com.makegw.makegwenum._write_enumifc_cpp
gw_cpp_writer = win32com.makegw.makegwenum._write_enumgw_cpp
else: # Use my harder working ones.
ifc_cpp_writer = _write_ifc_cpp
gw_cpp_writer = _write_gw_cpp
fout=open("Py%s.cpp" % interface.name, "w")
try:
fout.write(\
'''\
// This file implements the %s %s for Python.
// Generated by makegw.py
#include "shell_pch.h"
''' % (interface.name, desc))
# if bMakeGateway:
# fout.write('#include "PythonCOMServer.h"\n')
# if interface.base not in ["IUnknown", "IDispatch"]:
# fout.write('#include "Py%s.h"\n' % interface.base)
fout.write('#include "Py%s.h"\n\n// @doc - This file contains autoduck documentation\n' % interface.name)
if bMakeInterface: ifc_cpp_writer(fout, interface)
if bMakeGateway: gw_cpp_writer(fout, interface)
finally:
fout.close()
fout=open("Py%s.h" % interface.name, "w")
try:
fout.write(\
'''\
// This file declares the %s %s for Python.
// Generated by makegw.py
''' % (interface.name, desc))
if bMakeInterface: _write_ifc_h(fout, interface)
if bMakeGateway: _write_gw_h(fout, interface)
finally:
fout.close()
###########################################################################
#
# INTERNAL FUNCTIONS
#
#
def _write_ifc_h(f, interface):
f.write(\
'''\
// ---------------------------------------------------
//
// Interface Declaration
class Py%s : public Py%s
{
public:
MAKE_PYCOM_CTOR(Py%s);
static %s *GetI(PyObject *self);
static PyComTypeObject type;
// The Python methods
''' % (interface.name, interface.base, interface.name, interface.name))
for method in interface.methods:
f.write('\tstatic PyObject *%s(PyObject *self, PyObject *args);\n' % method.name)
f.write(\
'''\
protected:
Py%s(IUnknown *pdisp);
~Py%s();
};
''' % (interface.name, interface.name))
def _write_ifc_cpp(f, interface):
name = interface.name
f.write(\
'''\
// ---------------------------------------------------
//
// Interface Implementation
Py%(name)s::Py%(name)s(IUnknown *pdisp):
Py%(base)s(pdisp)
{
ob_type = &type;
}
Py%(name)s::~Py%(name)s()
{
}
/* static */ %(name)s *Py%(name)s::GetI(PyObject *self)
{
return (%(name)s *)Py%(base)s::GetI(self);
}
''' % (interface.__dict__))
ptr = re.sub('[a-z]', '', interface.name)
strdict = {'interfacename':interface.name, 'ptr': ptr}
for method in interface.methods:
strdict['method'] = method.name
f.write(\
'''\
// @pymethod |Py%(interfacename)s|%(method)s|Description of %(method)s.
PyObject *Py%(interfacename)s::%(method)s(PyObject *self, PyObject *args)
{
%(interfacename)s *p%(ptr)s = GetI(self);
if ( p%(ptr)s == NULL )
return NULL;
''' % strdict)
argsParseTuple = argsCOM = formatChars = codePost = \
codePobjects = codeCobjects = cleanup = cleanup_gil = ""
needConversion = 0
# if method.name=="Stat": import win32dbg;win32dbg.brk()
for arg in method.args:
try:
argCvt = makegwparse.make_arg_converter(arg)
if arg.HasAttribute("in"):
val = argCvt.GetFormatChar()
if val:
f.write ('\t' + argCvt.GetAutoduckString() + "\n")
formatChars = formatChars + val
argsParseTuple = argsParseTuple + ", " + argCvt.GetParseTupleArg()
codePobjects = codePobjects + argCvt.DeclareParseArgTupleInputConverter()
codePost = codePost + argCvt.GetParsePostCode()
needConversion = needConversion or argCvt.NeedUSES_CONVERSION()
cleanup = cleanup + argCvt.GetInterfaceArgCleanup()
cleanup_gil = cleanup_gil + argCvt.GetInterfaceArgCleanupGIL()
comArgName, comArgDeclString = argCvt.GetInterfaceCppObjectInfo()
if comArgDeclString: # If we should declare a variable
codeCobjects = codeCobjects + "\t%s;\n" % (comArgDeclString)
argsCOM = argsCOM + ", " + comArgName
except makegwparse.error_not_supported, why:
f.write('// *** The input argument %s of type "%s" was not processed ***\n// Please check the conversion function is appropriate and exists!\n' % (arg.name, arg.raw_type))
f.write('\t%s %s;\n\tPyObject *ob%s;\n' % (arg.type, arg.name, arg.name))
f.write('\t// @pyparm <o Py%s>|%s||Description for %s\n' % (arg.type, arg.name, arg.name))
codePost = codePost + '\tif (bPythonIsHappy && !PyObject_As%s( ob%s, &%s )) bPythonIsHappy = FALSE;\n' % (arg.type, arg.name, arg.name)
formatChars = formatChars + "O"
argsParseTuple = argsParseTuple + ", &ob%s" % (arg.name)
argsCOM = argsCOM + ", " + arg.name
cleanup = cleanup + "\tPyObject_Free%s(%s);\n" % (arg.type, arg.name)
if needConversion: f.write("\tUSES_CONVERSION;\n")
f.write(codePobjects);
f.write(codeCobjects);
f.write('\tif ( !PyArg_ParseTuple(args, "%s:%s"%s) )\n\t\treturn NULL;\n' % (formatChars, method.name, argsParseTuple))
if codePost:
f.write('\tBOOL bPythonIsHappy = TRUE;\n')
f.write(codePost);
f.write('\tif (!bPythonIsHappy) return NULL;\n')
strdict['argsCOM'] = argsCOM[1:]
strdict['cleanup'] = cleanup
strdict['cleanup_gil'] = cleanup_gil
f.write(\
''' HRESULT hr;
PY_INTERFACE_PRECALL;
hr = p%(ptr)s->%(method)s(%(argsCOM)s );
%(cleanup)s
PY_INTERFACE_POSTCALL;
%(cleanup_gil)s
if ( FAILED(hr) )
return PyCom_BuildPyException(hr, p%(ptr)s, IID_%(interfacename)s );
''' % strdict)
codePre = codePost = formatChars = codeVarsPass = codeDecl = ""
for arg in method.args:
if not arg.HasAttribute("out"):
continue
try:
argCvt = makegwparse.make_arg_converter(arg)
formatChar = argCvt.GetFormatChar()
if formatChar:
formatChars = formatChars + formatChar
codePre = codePre + argCvt.GetBuildForInterfacePreCode()
codePost = codePost + argCvt.GetBuildForInterfacePostCode()
codeVarsPass = codeVarsPass + ", " + argCvt.GetBuildValueArg()
codeDecl = codeDecl + argCvt.DeclareParseArgTupleInputConverter()
except makegwparse.error_not_supported, why:
f.write('// *** The output argument %s of type "%s" was not processed ***\n// %s\n' % (arg.name, arg.raw_type, why))
continue
if formatChars:
f.write('%s\n%s\tPyObject *pyretval = Py_BuildValue("%s"%s);\n%s\treturn pyretval;' % (codeDecl, codePre, formatChars, codeVarsPass, codePost))
else:
f.write('\tPy_INCREF(Py_None);\n\treturn Py_None;\n')
f.write('\n}\n\n')
f.write ('// @object Py%s|Description of the interface\n' % (name))
f.write('static struct PyMethodDef Py%s_methods[] =\n{\n' % name)
for method in interface.methods:
f.write('\t{ "%s", Py%s::%s, 1 }, // @pymeth %s|Description of %s\n' % (method.name, interface.name, method.name, method.name, method.name))
interfacebase = interface.base
f.write('''\
{ NULL }
};
PyComTypeObject Py%(name)s::type("Py%(name)s",
&Py%(interfacebase)s::type,
sizeof(Py%(name)s),
Py%(name)s_methods,
GET_PYCOM_CTOR(Py%(name)s));
''' % locals())
def _write_gw_h(f, interface):
if interface.name[0] == "I":
gname = 'PyG' + interface.name[1:]
else:
gname = 'PyG' + interface.name
name = interface.name
if interface.base == "IUnknown" or interface.base == "IDispatch":
base_name = "PyGatewayBase"
else:
if interface.base[0] == "I":
base_name = 'PyG' + interface.base[1:]
else:
base_name = 'PyG' + interface.base
f.write(\
'''\
// ---------------------------------------------------
//
// Gateway Declaration
class %s : public %s, public %s
{
protected:
%s(PyObject *instance) : %s(instance) { ; }
PYGATEWAY_MAKE_SUPPORT2(%s, %s, IID_%s, %s)
''' % (gname, base_name, name, gname, base_name, gname, name, name, base_name))
if interface.base != "IUnknown":
f.write("\t// %s\n\t// *** Manually add %s method decls here\n\n" % (interface.base, interface.base))
else:
f.write('\n\n')
f.write("\t// %s\n" % name)
for method in interface.methods:
f.write('\tSTDMETHOD(%s)(\n' % method.name)
if method.args:
for arg in method.args[:-1]:
f.write("\t\t%s,\n" % (arg.GetRawDeclaration()))
arg = method.args[-1]
f.write("\t\t%s);\n\n" % (arg.GetRawDeclaration()))
else:
f.write('\t\tvoid);\n\n')
f.write('};\n')
f.close()
def _write_gw_cpp(f, interface):
if interface.name[0] == "I":
gname = 'PyG' + interface.name[1:]
else:
gname = 'PyG' + interface.name
name = interface.name
if interface.base == "IUnknown" or interface.base == "IDispatch":
base_name = "PyGatewayBase"
else:
if interface.base[0] == "I":
base_name = 'PyG' + interface.base[1:]
else:
base_name = 'PyG' + interface.base
f.write('''\
// ---------------------------------------------------
//
// Gateway Implementation
''' % {'name':name, 'gname':gname, 'base_name':base_name})
for method in interface.methods:
f.write(\
'''\
STDMETHODIMP %s::%s(
''' % (gname, method.name))
if method.args:
for arg in method.args[:-1]:
inoutstr = ']['.join(arg.inout)
f.write("\t\t/* [%s] */ %s,\n" % (inoutstr, arg.GetRawDeclaration()))
arg = method.args[-1]
inoutstr = ']['.join(arg.inout)
f.write("\t\t/* [%s] */ %s)\n" % (inoutstr, arg.GetRawDeclaration()))
else:
f.write('\t\tvoid)\n')
f.write("{\n\tPY_GATEWAY_METHOD;\n")
cout = 0
codePre = codePost = codeVars = ""
argStr = ""
needConversion = 0
formatChars = ""
if method.args:
for arg in method.args:
if arg.HasAttribute("out"):
cout = cout + 1
if arg.indirectionLevel ==2 :
f.write("\tif (%s==NULL) return E_POINTER;\n" % arg.name)
if arg.HasAttribute("in"):
try:
argCvt = makegwparse.make_arg_converter(arg)
argCvt.SetGatewayMode()
formatchar = argCvt.GetFormatChar();
needConversion = needConversion or argCvt.NeedUSES_CONVERSION()
if formatchar:
formatChars = formatChars + formatchar
codeVars = codeVars + argCvt.DeclareParseArgTupleInputConverter()
argStr = argStr + ", " + argCvt.GetBuildValueArg()
codePre = codePre + argCvt.GetBuildForGatewayPreCode()
codePost = codePost + argCvt.GetBuildForGatewayPostCode()
except makegwparse.error_not_supported, why:
f.write('// *** The input argument %s of type "%s" was not processed ***\n// - Please ensure this conversion function exists, and is appropriate\n// - %s\n' % (arg.name, arg.raw_type, why))
f.write('\tPyObject *ob%s = PyObject_From%s(%s);\n' % (arg.name, arg.type, arg.name))
f.write('\tif (ob%s==NULL) return MAKE_PYCOM_GATEWAY_FAILURE_CODE("%s");\n' % (arg.name, method.name))
codePost = codePost + "\tPy_DECREF(ob%s);\n" % arg.name
formatChars = formatChars + "O"
argStr = argStr + ", ob%s" % (arg.name)
if needConversion: f.write('\tUSES_CONVERSION;\n')
f.write(codeVars)
f.write(codePre)
if cout:
f.write("\tPyObject *result;\n")
resStr = "&result"
else:
resStr = "NULL"
if formatChars:
fullArgStr = '%s, "%s"%s' % (resStr, formatChars, argStr)
else:
fullArgStr = resStr
f.write('\tHRESULT hr=InvokeViaPolicy("%s", %s);\n' % (method.name, fullArgStr))
f.write(codePost)
if cout:
f.write("\tif (FAILED(hr)) return hr;\n")
f.write("\t// Process the Python results, and convert back to the real params\n")
# process the output arguments.
formatChars = codePobjects = codePost = argsParseTuple = ""
needConversion = 0
for arg in method.args:
if not arg.HasAttribute("out"):
continue
try:
argCvt = makegwparse.make_arg_converter(arg)
argCvt.SetGatewayMode()
val = argCvt.GetFormatChar()
if val:
formatChars = formatChars + val
argsParseTuple = argsParseTuple + ", " + argCvt.GetParseTupleArg()
codePobjects = codePobjects + argCvt.DeclareParseArgTupleInputConverter()
codePost = codePost + argCvt.GetParsePostCode()
needConversion = needConversion or argCvt.NeedUSES_CONVERSION()
except makegwparse.error_not_supported, why:
f.write('// *** The output argument %s of type "%s" was not processed ***\n// %s\n' % (arg.name, arg.raw_type, why))
if formatChars: # If I have any to actually process.
if len(formatChars)==1:
parseFn = "PyArg_Parse"
else:
parseFn = "PyArg_ParseTuple"
if codePobjects: f.write(codePobjects)
f.write('\tif (!%s(result, "%s" %s))\n\t\treturn MAKE_PYCOM_GATEWAY_FAILURE_CODE("%s");\n' % (parseFn, formatChars, argsParseTuple, method.name))
if codePost:
f.write('\tBOOL bPythonIsHappy = TRUE;\n')
f.write(codePost)
f.write('\tif (!bPythonIsHappy) hr = MAKE_PYCOM_GATEWAY_FAILURE_CODE("%s");\n' % method.name)
f.write('\tPy_DECREF(result);\n');
f.write('\treturn hr;\n}\n\n')
def test():
# make_framework_support("d:\\msdev\\include\\objidl.h", "ILockBytes")
make_framework_support("d:\\msdev\\include\\objidl.h", "IStorage")
# make_framework_support("d:\\msdev\\include\\objidl.h", "IEnumSTATSTG")
| gpl-3.0 |
Microsoft/PTVS | Python/Templates/Django/ProjectTemplates/Python/Web/DjangoProject/settings.py | 4 | 3444 | """
Django settings for $safeprojectname$ project.
Based on by 'django-admin startproject' using Django 2.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
import posixpath
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '$guid2$'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application references
# https://docs.djangoproject.com/en/2.1/ref/settings/#std:setting-INSTALLED_APPS
INSTALLED_APPS = [
# Add your apps here to enable them
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
# Middleware framework
# https://docs.djangoproject.com/en/2.1/topics/http/middleware/
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = '$safeprojectname$.urls'
# Template configuration
# https://docs.djangoproject.com/en/2.1/topics/templates/
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = '$safeprojectname$.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = posixpath.join(*(BASE_DIR.split(os.path.sep) + ['static']))
| apache-2.0 |
cypod/arsenalsuite | cpp/lib/PyQt4/examples/dialogs/findfiles.py | 20 | 7982 | #!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2010 Riverbank Computing Limited.
## Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
## All rights reserved.
##
## This file is part of the examples of PyQt.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor
## the names of its contributors may be used to endorse or promote
## products derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
## $QT_END_LICENSE$
##
#############################################################################
from PyQt4 import QtCore, QtGui
class Window(QtGui.QDialog):
def __init__(self, parent=None):
super(Window, self).__init__(parent)
browseButton = self.createButton("&Browse...", self.browse)
findButton = self.createButton("&Find", self.find)
self.fileComboBox = self.createComboBox("*")
self.textComboBox = self.createComboBox()
self.directoryComboBox = self.createComboBox(QtCore.QDir.currentPath())
fileLabel = QtGui.QLabel("Named:")
textLabel = QtGui.QLabel("Containing text:")
directoryLabel = QtGui.QLabel("In directory:")
self.filesFoundLabel = QtGui.QLabel()
self.createFilesTable()
buttonsLayout = QtGui.QHBoxLayout()
buttonsLayout.addStretch()
buttonsLayout.addWidget(findButton)
mainLayout = QtGui.QGridLayout()
mainLayout.addWidget(fileLabel, 0, 0)
mainLayout.addWidget(self.fileComboBox, 0, 1, 1, 2)
mainLayout.addWidget(textLabel, 1, 0)
mainLayout.addWidget(self.textComboBox, 1, 1, 1, 2)
mainLayout.addWidget(directoryLabel, 2, 0)
mainLayout.addWidget(self.directoryComboBox, 2, 1)
mainLayout.addWidget(browseButton, 2, 2)
mainLayout.addWidget(self.filesTable, 3, 0, 1, 3)
mainLayout.addWidget(self.filesFoundLabel, 4, 0)
mainLayout.addLayout(buttonsLayout, 5, 0, 1, 3)
self.setLayout(mainLayout)
self.setWindowTitle("Find Files")
self.resize(700, 300)
def browse(self):
directory = QtGui.QFileDialog.getExistingDirectory(self, "Find Files",
QtCore.QDir.currentPath())
if directory:
if self.directoryComboBox.findText(directory) == -1:
self.directoryComboBox.addItem(directory)
self.directoryComboBox.setCurrentIndex(self.directoryComboBox.findText(directory))
@staticmethod
def updateComboBox(comboBox):
if comboBox.findText(comboBox.currentText()) == -1:
comboBox.addItem(comboBox.currentText())
def find(self):
self.filesTable.setRowCount(0)
fileName = self.fileComboBox.currentText()
text = self.textComboBox.currentText()
path = self.directoryComboBox.currentText()
self.updateComboBox(self.fileComboBox)
self.updateComboBox(self.textComboBox)
self.updateComboBox(self.directoryComboBox)
self.currentDir = QtCore.QDir(path)
if not fileName:
fileName = "*"
files = self.currentDir.entryList([fileName],
QtCore.QDir.Files | QtCore.QDir.NoSymLinks)
if text:
files = self.findFiles(files, text)
self.showFiles(files)
def findFiles(self, files, text):
progressDialog = QtGui.QProgressDialog(self)
progressDialog.setCancelButtonText("&Cancel")
progressDialog.setRange(0, files.count())
progressDialog.setWindowTitle("Find Files")
foundFiles = []
for i in range(files.count()):
progressDialog.setValue(i)
progressDialog.setLabelText("Searching file number %d of %d..." % (i, files.count()))
QtGui.qApp.processEvents()
if progressDialog.wasCanceled():
break
inFile = QtCore.QFile(self.currentDir.absoluteFilePath(files[i]))
if inFile.open(QtCore.QIODevice.ReadOnly):
stream = QtCore.QTextStream(inFile)
while not stream.atEnd():
if progressDialog.wasCanceled():
break
line = stream.readLine()
if text in line:
foundFiles.append(files[i])
break
progressDialog.close()
return foundFiles
def showFiles(self, files):
for fn in files:
file = QtCore.QFile(self.currentDir.absoluteFilePath(fn))
size = QtCore.QFileInfo(file).size()
fileNameItem = QtGui.QTableWidgetItem(fn)
fileNameItem.setFlags(fileNameItem.flags() ^ QtCore.Qt.ItemIsEditable)
sizeItem = QtGui.QTableWidgetItem("%d KB" % (int((size + 1023) / 1024)))
sizeItem.setTextAlignment(QtCore.Qt.AlignVCenter | QtCore.Qt.AlignRight)
sizeItem.setFlags(sizeItem.flags() ^ QtCore.Qt.ItemIsEditable)
row = self.filesTable.rowCount()
self.filesTable.insertRow(row)
self.filesTable.setItem(row, 0, fileNameItem)
self.filesTable.setItem(row, 1, sizeItem)
self.filesFoundLabel.setText("%d file(s) found (Double click on a file to open it)" % len(files))
def createButton(self, text, member):
button = QtGui.QPushButton(text)
button.clicked.connect(member)
return button
def createComboBox(self, text=""):
comboBox = QtGui.QComboBox()
comboBox.setEditable(True)
comboBox.addItem(text)
comboBox.setSizePolicy(QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Preferred)
return comboBox
def createFilesTable(self):
self.filesTable = QtGui.QTableWidget(0, 2)
self.filesTable.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
self.filesTable.setHorizontalHeaderLabels(("File Name", "Size"))
self.filesTable.horizontalHeader().setResizeMode(0, QtGui.QHeaderView.Stretch)
self.filesTable.verticalHeader().hide()
self.filesTable.setShowGrid(False)
self.filesTable.cellActivated.connect(self.openFileOfItem)
def openFileOfItem(self, row, column):
item = self.filesTable.item(row, 0)
QtGui.QDesktopServices.openUrl(QtCore.QUrl(self.currentDir.absoluteFilePath(item.text())))
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
window = Window()
window.show()
sys.exit(app.exec_())
| gpl-2.0 |
gadybadger/mixpanel-celery | mixpanel/conf/settings.py | 1 | 2592 | """Default configuration values and documentation"""
from django.conf import settings
"""
.. data:: MIXPANEL_API_TOKEN
API token for your Mixpanel account. This configures the Mixpanel account
where all of the action will be.
You can find this on the ``API Information`` tab on your
`mixpanel account page`_
.. _`mixpanel account page`: http://mixpanel.com/user/account/
"""
MIXPANEL_API_TOKEN = getattr(settings, 'MIXPANEL_API_TOKEN', None)
"""
.. data:: MIXPANEL_RETRY_DELAY
Number of seconds to wait before retrying an event-tracking request that
failed because of an invalid server response. These failed responses are
usually 502's or 504's because Mixpanel is under increased load.
Defaults to 5 minutes.
"""
MIXPANEL_RETRY_DELAY = getattr(settings, 'MIXPANEL_RETRY_DELAY', 60*5)
"""
.. data:: MIXPANEL_MAX_RETRIES
Number of retry attempts to make before raising an exception.
Defaults to 5 attempts.
"""
MIXPANEL_MAX_RETRIES = getattr(settings, 'MIXPANEL_MAX_RETRIES', 5)
"""
.. data:: MIXPANEL_API_TIMEOUT
Number of seconds to wait before timing out a request the mixpanel api
server. The default 30-second timeout can cause your job queue to become
swamped.
Defaults to 5 seconds.
"""
MIXPANEL_API_TIMEOUT = getattr(settings, 'MIXPANEL_API_TIMEOUT', 5)
"""
.. data:: MIXPANEL_API_SERVER
URL for the mixpanel api server. This probably shouldn't change.
"""
MIXPANEL_API_SERVER = getattr(settings, 'MIXPANEL_API_SERVER',
'api.mixpanel.com')
"""
.. data:: MIXPANEL_TRACKING_ENDPOINT
URL endpoint for registering events. defaults to ``/track/``
Mind the slashes.
"""
MIXPANEL_TRACKING_ENDPOINT = getattr(settings, 'MIXPANEL_TRACKING_ENDPOINT',
'/track/')
"""
.. data:: MIXPANEL_PEOPLE_TRACKING_ENDPOINT
URL endpoint for registering people data. defaults to ``/engage/``
Mind the slashes.
"""
MIXPANEL_PEOPLE_TRACKING_ENDPOINT = getattr(settings, 'MIXPANEL_PEOPLE_TRACKING_ENDPOINT',
'/engage/')
"""
.. data:: MIXPANEL_DATA_VARIABLE
Name of the http GET variable used for transferring property information
when registering events.
"""
MIXPANEL_DATA_VARIABLE = getattr(settings, 'MIXPANEL_DATA_VARIABLE',
'data')
"""
.. data:: MIXPANEL_FUNNEL_EVENT_ID
The event identifier that indicates that a funnel is being tracked and not
just a normal event.
"""
MIXPANEL_FUNNEL_EVENT_ID = getattr(settings, 'MIXPANEL_FUNNEL_EVENT_ID',
'mp_funnel')
| bsd-3-clause |
gsehub/edx-platform | common/djangoapps/student/tests/test_reset_password.py | 8 | 17950 | """
Test the various password reset flows
"""
import json
import re
import unittest
import ddt
from django.conf import settings
from django.contrib.auth.hashers import UNUSABLE_PASSWORD_PREFIX
from django.contrib.auth.models import User
from django.contrib.auth.tokens import default_token_generator
from django.core.cache import cache
from django.core import mail
from django.urls import reverse
from django.test.client import RequestFactory
from django.test.utils import override_settings
from django.utils.http import int_to_base36
from edx_oauth2_provider.tests.factories import AccessTokenFactory, ClientFactory, RefreshTokenFactory
from mock import Mock, patch
from oauth2_provider import models as dot_models
from provider.oauth2 import models as dop_models
from openedx.core.djangoapps.oauth_dispatch.tests import factories as dot_factories
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.djangoapps.user_api.models import UserRetirementRequest
from openedx.core.djangoapps.user_api.config.waffle import PREVENT_AUTH_USER_WRITES, SYSTEM_MAINTENANCE_MSG, waffle
from openedx.core.djangolib.testing.utils import CacheIsolationTestCase
from student.tests.factories import UserFactory
from student.tests.test_email import mock_render_to_string
from student.views import SETTING_CHANGE_INITIATED, password_reset, password_reset_confirm_wrapper
from util.testing import EventTestMixin
from .test_configuration_overrides import fake_get_value
@unittest.skipUnless(
settings.ROOT_URLCONF == "lms.urls",
"reset password tests should only run in LMS"
)
@ddt.ddt
class ResetPasswordTests(EventTestMixin, CacheIsolationTestCase):
"""
Tests that clicking reset password sends email, and doesn't activate the user
"""
request_factory = RequestFactory()
ENABLED_CACHES = ['default']
def setUp(self):
super(ResetPasswordTests, self).setUp('student.views.management.tracker')
self.user = UserFactory.create()
self.user.is_active = False
self.user.save()
self.token = default_token_generator.make_token(self.user)
self.uidb36 = int_to_base36(self.user.id)
self.user_bad_passwd = UserFactory.create()
self.user_bad_passwd.is_active = False
self.user_bad_passwd.password = UNUSABLE_PASSWORD_PREFIX
self.user_bad_passwd.save()
@patch('student.views.management.render_to_string', Mock(side_effect=mock_render_to_string, autospec=True))
def test_user_bad_password_reset(self):
"""
Tests password reset behavior for user with password marked UNUSABLE_PASSWORD_PREFIX
"""
bad_pwd_req = self.request_factory.post('/password_reset/', {'email': self.user_bad_passwd.email})
bad_pwd_resp = password_reset(bad_pwd_req)
# If they've got an unusable password, we return a successful response code
self.assertEquals(bad_pwd_resp.status_code, 200)
obj = json.loads(bad_pwd_resp.content)
self.assertEquals(obj, {
'success': True,
'value': "('registration/password_reset_done.html', [])",
})
self.assert_no_events_were_emitted()
@patch('student.views.management.render_to_string', Mock(side_effect=mock_render_to_string, autospec=True))
def test_nonexist_email_password_reset(self):
"""
Now test the exception cases with of reset_password called with invalid email.
"""
bad_email_req = self.request_factory.post('/password_reset/', {'email': self.user.email + "makeItFail"})
bad_email_resp = password_reset(bad_email_req)
# Note: even if the email is bad, we return a successful response code
# This prevents someone potentially trying to "brute-force" find out which
# emails are and aren't registered with edX
self.assertEquals(bad_email_resp.status_code, 200)
obj = json.loads(bad_email_resp.content)
self.assertEquals(obj, {
'success': True,
'value': "('registration/password_reset_done.html', [])",
})
self.assert_no_events_were_emitted()
@patch('student.views.management.render_to_string', Mock(side_effect=mock_render_to_string, autospec=True))
def test_password_reset_ratelimited(self):
"""
Try (and fail) resetting password 30 times in a row on an non-existant email address
"""
cache.clear()
for i in xrange(30):
good_req = self.request_factory.post('/password_reset/', {
'email': 'thisdoesnotexist{0}@foo.com'.format(i)
})
good_resp = password_reset(good_req)
self.assertEquals(good_resp.status_code, 200)
# then the rate limiter should kick in and give a HttpForbidden response
bad_req = self.request_factory.post('/password_reset/', {'email': '[email protected]'})
bad_resp = password_reset(bad_req)
self.assertEquals(bad_resp.status_code, 403)
self.assert_no_events_were_emitted()
cache.clear()
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', "Test only valid in LMS")
@ddt.data('plain_text', 'html')
def test_reset_password_email(self, body_type):
"""Tests contents of reset password email, and that user is not active"""
good_req = self.request_factory.post('/password_reset/', {'email': self.user.email})
good_req.user = self.user
good_req.site = Mock(domain='example.com')
dop_client = ClientFactory()
dop_access_token = AccessTokenFactory(user=self.user, client=dop_client)
RefreshTokenFactory(user=self.user, client=dop_client, access_token=dop_access_token)
dot_application = dot_factories.ApplicationFactory(user=self.user)
dot_access_token = dot_factories.AccessTokenFactory(user=self.user, application=dot_application)
dot_factories.RefreshTokenFactory(user=self.user, application=dot_application, access_token=dot_access_token)
good_resp = password_reset(good_req)
self.assertEquals(good_resp.status_code, 200)
self.assertFalse(dop_models.AccessToken.objects.filter(user=self.user).exists())
self.assertFalse(dop_models.RefreshToken.objects.filter(user=self.user).exists())
self.assertFalse(dot_models.AccessToken.objects.filter(user=self.user).exists())
self.assertFalse(dot_models.RefreshToken.objects.filter(user=self.user).exists())
obj = json.loads(good_resp.content)
self.assertTrue(obj['success'])
self.assertIn('e-mailed you instructions for setting your password', obj['value'])
from_email = configuration_helpers.get_value('email_from_address', settings.DEFAULT_FROM_EMAIL)
sent_message = mail.outbox[0]
bodies = {
'plain_text': sent_message.body,
'html': sent_message.alternatives[0][0],
}
body = bodies[body_type]
self.assertIn("Password reset", sent_message.subject)
self.assertIn("You're receiving this e-mail because you requested a password reset", body)
self.assertEquals(sent_message.from_email, from_email)
self.assertEquals(len(sent_message.to), 1)
self.assertIn(self.user.email, sent_message.to)
self.assert_event_emitted(
SETTING_CHANGE_INITIATED, user_id=self.user.id, setting=u'password', old=None, new=None,
)
# Test that the user is not active
self.user = User.objects.get(pk=self.user.pk)
self.assertFalse(self.user.is_active)
self.assertIn('password_reset_confirm/', body)
re.search(r'password_reset_confirm/(?P<uidb36>[0-9A-Za-z]+)-(?P<token>.+)/', body).groupdict()
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', "Test only valid in LMS")
@patch('django.core.mail.send_mail')
@ddt.data((False, 'http://'), (True, 'https://'))
@ddt.unpack
def test_reset_password_email_https(self, is_secure, protocol, send_email):
"""
Tests that the right url protocol is included in the reset password link
"""
req = self.request_factory.post(
'/password_reset/', {'email': self.user.email}
)
req.site = Mock(domain='example.com')
req.is_secure = Mock(return_value=is_secure)
req.user = self.user
password_reset(req)
_, msg, _, _ = send_email.call_args[0]
expected_msg = "Please go to the following page and choose a new password:\n\n" + protocol
self.assertIn(expected_msg, msg)
self.assert_event_emitted(
SETTING_CHANGE_INITIATED, user_id=self.user.id, setting=u'password', old=None, new=None
)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', "Test only valid in LMS")
@patch('django.core.mail.send_mail')
@ddt.data(('Crazy Awesome Site', 'Crazy Awesome Site'), ('edX', 'edX'))
@ddt.unpack
def test_reset_password_email_site(self, site_name, platform_name, send_email):
"""
Tests that the right url domain and platform name is included in
the reset password email
"""
with patch("django.conf.settings.PLATFORM_NAME", platform_name):
with patch("django.conf.settings.SITE_NAME", site_name):
req = self.request_factory.post(
'/password_reset/', {'email': self.user.email}
)
req.user = self.user
req.site = Mock(domain='example.com')
password_reset(req)
_, msg, _, _ = send_email.call_args[0]
reset_msg = "you requested a password reset for your user account at {}"
reset_msg = reset_msg.format(site_name)
self.assertIn(reset_msg, msg)
sign_off = "The {} Team".format(platform_name)
self.assertIn(sign_off, msg)
self.assert_event_emitted(
SETTING_CHANGE_INITIATED, user_id=self.user.id, setting=u'password', old=None, new=None
)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', "Test only valid in LMS")
@patch("openedx.core.djangoapps.site_configuration.helpers.get_value", fake_get_value)
@ddt.data('plain_text', 'html')
def test_reset_password_email_configuration_override(self, body_type):
"""
Tests that the right url domain and platform name is included in
the reset password email
"""
req = self.request_factory.post(
'/password_reset/', {'email': self.user.email}
)
req.get_host = Mock(return_value=None)
req.site = Mock(domain='example.com')
req.user = self.user
with patch('crum.get_current_request', return_value=req):
password_reset(req)
sent_message = mail.outbox[0]
bodies = {
'plain_text': sent_message.body,
'html': sent_message.alternatives[0][0],
}
body = bodies[body_type]
reset_msg = "you requested a password reset for your user account at {}".format(fake_get_value('PLATFORM_NAME'))
self.assertIn(reset_msg, body)
self.assert_event_emitted(
SETTING_CHANGE_INITIATED, user_id=self.user.id, setting=u'password', old=None, new=None
)
self.assertEqual(sent_message.from_email, "[email protected]")
@ddt.data(
('invalidUid', 'invalid_token'),
(None, 'invalid_token'),
('invalidUid', None),
)
@ddt.unpack
def test_reset_password_bad_token(self, uidb36, token):
"""
Tests bad token and uidb36 in password reset
"""
if uidb36 is None:
uidb36 = self.uidb36
if token is None:
token = self.token
bad_request = self.request_factory.get(
reverse(
"password_reset_confirm",
kwargs={"uidb36": uidb36, "token": token}
)
)
password_reset_confirm_wrapper(bad_request, uidb36, token)
self.user = User.objects.get(pk=self.user.pk)
self.assertFalse(self.user.is_active)
def test_reset_password_good_token(self):
"""
Tests good token and uidb36 in password reset
"""
url = reverse(
"password_reset_confirm",
kwargs={"uidb36": self.uidb36, "token": self.token}
)
good_reset_req = self.request_factory.get(url)
password_reset_confirm_wrapper(good_reset_req, self.uidb36, self.token)
self.user = User.objects.get(pk=self.user.pk)
self.assertTrue(self.user.is_active)
def test_password_reset_fail(self):
"""
Tests that if we provide mismatched passwords, user is not marked as active.
"""
self.assertFalse(self.user.is_active)
url = reverse(
'password_reset_confirm',
kwargs={'uidb36': self.uidb36, 'token': self.token}
)
request_params = {'new_password1': 'password1', 'new_password2': 'password2'}
confirm_request = self.request_factory.post(url, data=request_params)
# Make a password reset request with mismatching passwords.
resp = password_reset_confirm_wrapper(confirm_request, self.uidb36, self.token)
# Verify the response status code is: 200 with password reset fail and also verify that
# the user is not marked as active.
self.assertEqual(resp.status_code, 200)
self.assertFalse(User.objects.get(pk=self.user.pk).is_active)
def test_password_reset_retired_user_fail(self):
"""
Tests that if a retired user attempts to reset their password, it fails.
"""
self.assertFalse(self.user.is_active)
# Retire the user.
UserRetirementRequest.create_retirement_request(self.user)
url = reverse(
'password_reset_confirm',
kwargs={'uidb36': self.uidb36, 'token': self.token}
)
reset_req = self.request_factory.get(url)
resp = password_reset_confirm_wrapper(reset_req, self.uidb36, self.token)
# Verify the response status code is: 200 with password reset fail and also verify that
# the user is not marked as active.
self.assertEqual(resp.status_code, 200)
self.assertFalse(User.objects.get(pk=self.user.pk).is_active)
def test_password_reset_prevent_auth_user_writes(self):
with waffle().override(PREVENT_AUTH_USER_WRITES, True):
url = reverse(
"password_reset_confirm",
kwargs={"uidb36": self.uidb36, "token": self.token}
)
for request in [self.request_factory.get(url), self.request_factory.post(url)]:
response = password_reset_confirm_wrapper(request, self.uidb36, self.token)
assert response.context_data['err_msg'] == SYSTEM_MAINTENANCE_MSG
self.user.refresh_from_db()
assert not self.user.is_active
@override_settings(PASSWORD_MIN_LENGTH=2)
@override_settings(PASSWORD_MAX_LENGTH=10)
@ddt.data(
{
'password': '1',
'error_message': 'Enter a password with at least 2 characters.',
},
{
'password': '01234567891',
'error_message': 'Enter a password with at most 10 characters.',
}
)
def test_password_reset_with_invalid_length(self, password_dict):
"""
Tests that if we provide password characters less then PASSWORD_MIN_LENGTH,
or more than PASSWORD_MAX_LENGTH, password reset will fail with error message.
"""
url = reverse(
'password_reset_confirm',
kwargs={'uidb36': self.uidb36, 'token': self.token}
)
request_params = {'new_password1': password_dict['password'], 'new_password2': password_dict['password']}
confirm_request = self.request_factory.post(url, data=request_params)
# Make a password reset request with minimum/maximum passwords characters.
response = password_reset_confirm_wrapper(confirm_request, self.uidb36, self.token)
self.assertEqual(response.context_data['err_msg'], password_dict['error_message'])
@patch('student.views.management.password_reset_confirm')
@patch("openedx.core.djangoapps.site_configuration.helpers.get_value", fake_get_value)
def test_reset_password_good_token_configuration_override(self, reset_confirm):
"""
Tests password reset confirmation page for site configuration override.
"""
url = reverse(
"password_reset_confirm",
kwargs={"uidb36": self.uidb36, "token": self.token}
)
good_reset_req = self.request_factory.get(url)
password_reset_confirm_wrapper(good_reset_req, self.uidb36, self.token)
confirm_kwargs = reset_confirm.call_args[1]
self.assertEquals(confirm_kwargs['extra_context']['platform_name'], 'Fake University')
self.user = User.objects.get(pk=self.user.pk)
self.assertTrue(self.user.is_active)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', "Test only valid in LMS")
@patch('django.core.mail.send_mail')
@ddt.data('Crazy Awesome Site', 'edX')
def test_reset_password_email_subject(self, platform_name, send_email):
"""
Tests that the right platform name is included in
the reset password email subject
"""
with patch("django.conf.settings.PLATFORM_NAME", platform_name):
req = self.request_factory.post(
'/password_reset/', {'email': self.user.email}
)
req.user = self.user
req.site = Mock(domain='example.com')
password_reset(req)
subj, _, _, _ = send_email.call_args[0]
self.assertIn(platform_name, subj)
| agpl-3.0 |
mxamin/youtube-dl | youtube_dl/extractor/fivemin.py | 79 | 1917 | from __future__ import unicode_literals
from .common import InfoExtractor
class FiveMinIE(InfoExtractor):
IE_NAME = '5min'
_VALID_URL = r'(?:5min:|https?://(?:[^/]*?5min\.com/|delivery\.vidible\.tv/aol)(?:(?:Scripts/PlayerSeed\.js|playerseed/?)?\?.*?playList=)?)(?P<id>\d+)'
_TESTS = [
{
# From http://www.engadget.com/2013/11/15/ipad-mini-retina-display-review/
'url': 'http://pshared.5min.com/Scripts/PlayerSeed.js?sid=281&width=560&height=345&playList=518013791',
'md5': '4f7b0b79bf1a470e5004f7112385941d',
'info_dict': {
'id': '518013791',
'ext': 'mp4',
'title': 'iPad Mini with Retina Display Review',
'description': 'iPad mini with Retina Display review',
'duration': 177,
'uploader': 'engadget',
'upload_date': '20131115',
'timestamp': 1384515288,
},
'params': {
# m3u8 download
'skip_download': True,
}
},
{
# From http://on.aol.com/video/how-to-make-a-next-level-fruit-salad-518086247
'url': '5min:518086247',
'md5': 'e539a9dd682c288ef5a498898009f69e',
'info_dict': {
'id': '518086247',
'ext': 'mp4',
'title': 'How to Make a Next-Level Fruit Salad',
'duration': 184,
},
'skip': 'no longer available',
},
{
'url': 'http://embed.5min.com/518726732/',
'only_matching': True,
},
{
'url': 'http://delivery.vidible.tv/aol?playList=518013791',
'only_matching': True,
}
]
def _real_extract(self, url):
video_id = self._match_id(url)
return self.url_result('aol-video:%s' % video_id)
| unlicense |
thomasrogers03/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/style/checkers/xml.py | 187 | 2044 | # Copyright (C) 2010 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Checks WebKit style for XML files."""
from __future__ import absolute_import
from xml.parsers import expat
class XMLChecker(object):
"""Processes XML lines for checking style."""
def __init__(self, file_path, handle_style_error):
self._handle_style_error = handle_style_error
self._handle_style_error.turn_off_line_filtering()
def check(self, lines):
parser = expat.ParserCreate()
try:
for line in lines:
parser.Parse(line)
parser.Parse('\n')
parser.Parse('', True)
except expat.ExpatError, error:
self._handle_style_error(error.lineno, 'xml/syntax', 5, expat.ErrorString(error.code))
| bsd-3-clause |
Maksimall89/houmpack | lcdmy.py | 1 | 4270 | #!/usr/bin/python
# --------------------------------------
# ___ ___ _ ____
# / _ \/ _ \(_) __/__ __ __
# / , _/ ___/ /\ \/ _ \/ // /
# /_/|_/_/ /_/___/ .__/\_, /
# /_/ /___/
#
# lcd_16x2.py
# 20x4 LCD Test Script with
# backlight control and text justification
#
# Author : Matt Hawkins
# Date : 06/04/2015
#
# http://www.raspberrypi-spy.co.uk/
#
# --------------------------------------
# The wiring for the LCD is as follows:
# 1 : GND
# 2 : 5V
# 3 : Contrast (0-5V)*
# 4 : RS (Register Select)
# 5 : R/W (Read Write) - GROUND THIS PIN
# 6 : Enable or Strobe
# 7 : Data Bit 0 - NOT USED
# 8 : Data Bit 1 - NOT USED
# 9 : Data Bit 2 - NOT USED
# 10: Data Bit 3 - NOT USED
# 11: Data Bit 4
# 12: Data Bit 5
# 13: Data Bit 6
# 14: Data Bit 7
# 15: LCD Backlight +5V**
# 16: LCD Backlight GND
# import
import RPi.GPIO as GPIO
import time
# Define GPIO to LCD mapping
LCD_RS = 7
LCD_E = 8
LCD_D4 = 25
LCD_D5 = 24
LCD_D6 = 23
LCD_D7 = 18
LED_ON = 15
# Define some device constants
LCD_WIDTH = 20 # Maximum characters per line
LCD_CHR = True
LCD_CMD = False
LCD_LINE_1 = 0x80 # LCD RAM address for the 1st line
LCD_LINE_2 = 0xC0 # LCD RAM address for the 2nd line
LCD_LINE_3 = 0x94 # LCD RAM address for the 3rd line
LCD_LINE_4 = 0xD4 # LCD RAM address for the 4th line
# Timing constants
E_PULSE = 0.0005
E_DELAY = 0.0005
def work(messange, line, style):
if line == 1:
string(messange, LCD_LINE_1, style)
elif line == 2:
string(messange, LCD_LINE_2, style)
elif line == 3:
string(messange, LCD_LINE_3, style)
elif line == 4:
string(messange, LCD_LINE_4, style)
def init():
# Main program block
GPIO.setmode(GPIO.BCM) # Use BCM GPIO numbers
GPIO.setup(LCD_E, GPIO.OUT) # E
GPIO.setup(LCD_RS, GPIO.OUT) # RS
GPIO.setup(LCD_D4, GPIO.OUT) # DB4
GPIO.setup(LCD_D5, GPIO.OUT) # DB5
GPIO.setup(LCD_D6, GPIO.OUT) # DB6
GPIO.setup(LCD_D7, GPIO.OUT) # DB7
GPIO.setup(LED_ON, GPIO.OUT) # Backlight enable
# Initialise display
byte(0x33, LCD_CMD) # 110011 Initialise
byte(0x32, LCD_CMD) # 110010 Initialise
byte(0x06, LCD_CMD) # 000110 Cursor move direction
byte(0x0C, LCD_CMD) # 001100 Display On,Cursor Off, Blink Off
byte(0x28, LCD_CMD) # 101000 Data length, number of lines, font size
byte(0x01, LCD_CMD) # 000001 Clear display
time.sleep(E_DELAY)
def byte(bits, mode):
# Send byte to data pins
# bits = data
# mode = True for character
# False for command
GPIO.output(LCD_RS, mode) # RS
# High bits
GPIO.output(LCD_D4, False)
GPIO.output(LCD_D5, False)
GPIO.output(LCD_D6, False)
GPIO.output(LCD_D7, False)
if bits & 0x10 == 0x10:
GPIO.output(LCD_D4, True)
if bits & 0x20 == 0x20:
GPIO.output(LCD_D5, True)
if bits & 0x40 == 0x40:
GPIO.output(LCD_D6, True)
if bits & 0x80 == 0x80:
GPIO.output(LCD_D7, True)
# Toggle 'Enable' pin
toggle_enable()
# Low bits
GPIO.output(LCD_D4, False)
GPIO.output(LCD_D5, False)
GPIO.output(LCD_D6, False)
GPIO.output(LCD_D7, False)
if bits & 0x01 == 0x01:
GPIO.output(LCD_D4, True)
if bits & 0x02 == 0x02:
GPIO.output(LCD_D5, True)
if bits & 0x04 == 0x04:
GPIO.output(LCD_D6, True)
if bits & 0x08 == 0x08:
GPIO.output(LCD_D7, True)
# Toggle 'Enable' pin
toggle_enable()
def toggle_enable():
# Toggle enable
time.sleep(E_DELAY)
GPIO.output(LCD_E, True)
time.sleep(E_PULSE)
GPIO.output(LCD_E, False)
time.sleep(E_DELAY)
def string(message, line, style):
# Send string to display
# style=1 Left justified
# style=2 Centred
# style=3 Right justified
if style == "left":
message = message.ljust(LCD_WIDTH, " ")
elif style == "center":
message = message.center(LCD_WIDTH, " ")
elif style == "right":
message = message.rjust(LCD_WIDTH, " ")
else:
message = message.ljust(LCD_WIDTH, " ")
byte(line, LCD_CMD)
for i in range(LCD_WIDTH):
byte(ord(message[i]), LCD_CHR)
def clear():
# Blank display
byte(0x01, LCD_CMD)
| gpl-3.0 |
talbrecht/pism_pik06 | doc/site-packages/sphinxcontrib/bibtex/latex_codec.py | 2 | 36564 | # -*- coding: utf-8 -*-
"""
Character translation utilities for LaTeX-formatted text
========================================================
Usage:
- unicode(string,'latex')
- ustring.decode('latex')
are both available just by letting "import latex" find this file.
- unicode(string,'latex+latin1')
- ustring.decode('latex+latin1')
where latin1 can be replaced by any other known encoding, also
become available by calling latex.register().
Copyright (c) 2003, 2008 David Eppstein
Copyright (c) 2011 Matthias C. M. Troffaes
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import print_function
import codecs
import collections
import re
from sphinxcontrib.bibtex import latex_lexer
def register():
"""Enable encodings of the form 'latex+x' where x describes another encoding.
Unicode characters are translated to or from x when possible, otherwise
expanded to latex.
"""
codecs.register(find_latex)
# returns the codec search function
# this is used if latex_codec.py were to be placed in stdlib
def getregentry():
"""Encodings module API."""
return find_latex('latex')
class LatexUnicodeTable:
"""Tabulates a translation between latex and unicode."""
def __init__(self, lexer):
self.lexer = lexer
self.unicode_map = {}
self.max_length = 0
self.latex_map = {}
self.register_all()
def register_all(self):
# TODO complete this list
# register special symbols
self.register(u'\N{EN DASH}', b'--')
self.register(u'\N{EN DASH}', b'\\textendash')
self.register(u'\N{EM DASH}', b'---')
self.register(u'\N{EM DASH}', b'\\textemdash')
self.register(u'\N{LEFT SINGLE QUOTATION MARK}', b'`', decode=False)
self.register(u'\N{RIGHT SINGLE QUOTATION MARK}', b"'", decode=False)
self.register(u'\N{LEFT DOUBLE QUOTATION MARK}', b'``')
self.register(u'\N{RIGHT DOUBLE QUOTATION MARK}', b"''")
self.register(u'\N{DAGGER}', b'\\dag')
self.register(u'\N{DOUBLE DAGGER}', b'\\ddag')
self.register(u'\N{BULLET}', b'\\bullet', mode='math')
self.register(u'\N{BULLET}', b'\\textbullet', package='textcomp')
self.register(u'\N{NUMBER SIGN}', b'\\#')
self.register(u'\N{AMPERSAND}', b'\\&')
self.register(u'\N{NO-BREAK SPACE}', b'~')
self.register(u'\N{INVERTED EXCLAMATION MARK}', b'!`')
self.register(u'\N{CENT SIGN}', b'\\not{c}')
self.register(u'\N{POUND SIGN}', b'\\pounds')
self.register(u'\N{POUND SIGN}', b'\\textsterling', package='textcomp')
self.register(u'\N{SECTION SIGN}', b'\\S')
self.register(u'\N{DIAERESIS}', b'\\"{}')
self.register(u'\N{NOT SIGN}', b'\\neg')
self.register(u'\N{SOFT HYPHEN}', b'\\-')
self.register(u'\N{MACRON}', b'\\={}')
self.register(u'\N{DEGREE SIGN}', b'^\\circ', mode='math')
self.register(u'\N{DEGREE SIGN}', b'\\textdegree', package='textcomp')
self.register(u'\N{PLUS-MINUS SIGN}', b'\\pm', mode='math')
self.register(u'\N{PLUS-MINUS SIGN}', b'\\textpm', package='textcomp')
self.register(u'\N{SUPERSCRIPT TWO}', b'^2', mode='math')
self.register(u'\N{SUPERSCRIPT TWO}', b'\\texttwosuperior', package='textcomp')
self.register(u'\N{SUPERSCRIPT THREE}', b'^3', mode='math')
self.register(u'\N{SUPERSCRIPT THREE}', b'\\textthreesuperior', package='textcomp')
self.register(u'\N{ACUTE ACCENT}', b"\\'{}")
self.register(u'\N{MICRO SIGN}', b'\\mu', mode='math')
self.register(u'\N{MICRO SIGN}', b'\\micro', package='gensymb')
self.register(u'\N{PILCROW SIGN}', b'\\P')
self.register(u'\N{MIDDLE DOT}', b'\\cdot', mode='math')
self.register(u'\N{MIDDLE DOT}', b'\\textperiodcentered', package='textcomp')
self.register(u'\N{CEDILLA}', b'\\c{}')
self.register(u'\N{SUPERSCRIPT ONE}', b'^1', mode='math')
self.register(u'\N{SUPERSCRIPT ONE}', b'\\textonesuperior', package='textcomp')
self.register(u'\N{INVERTED QUESTION MARK}', b'?`')
self.register(u'\N{LATIN CAPITAL LETTER A WITH GRAVE}', b'\\`A')
self.register(u'\N{LATIN CAPITAL LETTER A WITH CIRCUMFLEX}', b'\\^A')
self.register(u'\N{LATIN CAPITAL LETTER A WITH TILDE}', b'\\~A')
self.register(u'\N{LATIN CAPITAL LETTER A WITH DIAERESIS}', b'\\"A')
self.register(u'\N{LATIN CAPITAL LETTER A WITH RING ABOVE}', b'\\AA')
self.register(u'\N{LATIN CAPITAL LETTER AE}', b'\\AE')
self.register(u'\N{LATIN CAPITAL LETTER C WITH CEDILLA}', b'\\c C')
self.register(u'\N{LATIN CAPITAL LETTER E WITH GRAVE}', b'\\`E')
self.register(u'\N{LATIN CAPITAL LETTER E WITH ACUTE}', b"\\'E")
self.register(u'\N{LATIN CAPITAL LETTER E WITH CIRCUMFLEX}', b'\\^E')
self.register(u'\N{LATIN CAPITAL LETTER E WITH DIAERESIS}', b'\\"E')
self.register(u'\N{LATIN CAPITAL LETTER I WITH GRAVE}', b'\\`I')
self.register(u'\N{LATIN CAPITAL LETTER I WITH CIRCUMFLEX}', b'\\^I')
self.register(u'\N{LATIN CAPITAL LETTER I WITH DIAERESIS}', b'\\"I')
self.register(u'\N{LATIN CAPITAL LETTER N WITH TILDE}', b'\\~N')
self.register(u'\N{LATIN CAPITAL LETTER O WITH GRAVE}', b'\\`O')
self.register(u'\N{LATIN CAPITAL LETTER O WITH ACUTE}', b"\\'O")
self.register(u'\N{LATIN CAPITAL LETTER O WITH CIRCUMFLEX}', b'\\^O')
self.register(u'\N{LATIN CAPITAL LETTER O WITH TILDE}', b'\\~O')
self.register(u'\N{LATIN CAPITAL LETTER O WITH DIAERESIS}', b'\\"O')
self.register(u'\N{MULTIPLICATION SIGN}', b'\\times', mode='math')
self.register(u'\N{LATIN CAPITAL LETTER O WITH STROKE}', b'\\O')
self.register(u'\N{LATIN CAPITAL LETTER U WITH GRAVE}', b'\\`U')
self.register(u'\N{LATIN CAPITAL LETTER U WITH ACUTE}', b"\\'U")
self.register(u'\N{LATIN CAPITAL LETTER U WITH CIRCUMFLEX}', b'\\^U')
self.register(u'\N{LATIN CAPITAL LETTER U WITH DIAERESIS}', b'\\"U')
self.register(u'\N{LATIN CAPITAL LETTER Y WITH ACUTE}', b"\\'Y")
self.register(u'\N{LATIN SMALL LETTER SHARP S}', b'\\ss')
self.register(u'\N{LATIN SMALL LETTER A WITH GRAVE}', b'\\`a')
self.register(u'\N{LATIN SMALL LETTER A WITH ACUTE}', b"\\'a")
self.register(u'\N{LATIN SMALL LETTER A WITH CIRCUMFLEX}', b'\\^a')
self.register(u'\N{LATIN SMALL LETTER A WITH TILDE}', b'\\~a')
self.register(u'\N{LATIN SMALL LETTER A WITH DIAERESIS}', b'\\"a')
self.register(u'\N{LATIN SMALL LETTER A WITH RING ABOVE}', b'\\aa')
self.register(u'\N{LATIN SMALL LETTER AE}', b'\\ae')
self.register(u'\N{LATIN SMALL LETTER C WITH CEDILLA}', b'\\c c')
self.register(u'\N{LATIN SMALL LETTER E WITH GRAVE}', b'\\`e')
self.register(u'\N{LATIN SMALL LETTER E WITH ACUTE}', b"\\'e")
self.register(u'\N{LATIN SMALL LETTER E WITH CIRCUMFLEX}', b'\\^e')
self.register(u'\N{LATIN SMALL LETTER E WITH DIAERESIS}', b'\\"e')
self.register(u'\N{LATIN SMALL LETTER I WITH GRAVE}', b'\\`\\i')
self.register(u'\N{LATIN SMALL LETTER I WITH GRAVE}', b'\\`i')
self.register(u'\N{LATIN SMALL LETTER I WITH ACUTE}', b"\\'\\i")
self.register(u'\N{LATIN SMALL LETTER I WITH ACUTE}', b"\\'i")
self.register(u'\N{LATIN SMALL LETTER I WITH CIRCUMFLEX}', b'\\^\\i')
self.register(u'\N{LATIN SMALL LETTER I WITH CIRCUMFLEX}', b'\\^i')
self.register(u'\N{LATIN SMALL LETTER I WITH DIAERESIS}', b'\\"\\i')
self.register(u'\N{LATIN SMALL LETTER I WITH DIAERESIS}', b'\\"i')
self.register(u'\N{LATIN SMALL LETTER N WITH TILDE}', b'\\~n')
self.register(u'\N{LATIN SMALL LETTER O WITH GRAVE}', b'\\`o')
self.register(u'\N{LATIN SMALL LETTER O WITH ACUTE}', b"\\'o")
self.register(u'\N{LATIN SMALL LETTER O WITH CIRCUMFLEX}', b'\\^o')
self.register(u'\N{LATIN SMALL LETTER O WITH TILDE}', b'\\~o')
self.register(u'\N{LATIN SMALL LETTER O WITH DIAERESIS}', b'\\"o')
self.register(u'\N{DIVISION SIGN}', b'\\div', mode='math')
self.register(u'\N{LATIN SMALL LETTER O WITH STROKE}', b'\\o')
self.register(u'\N{LATIN SMALL LETTER U WITH GRAVE}', b'\\`u')
self.register(u'\N{LATIN SMALL LETTER U WITH ACUTE}', b"\\'u")
self.register(u'\N{LATIN SMALL LETTER U WITH CIRCUMFLEX}', b'\\^u')
self.register(u'\N{LATIN SMALL LETTER U WITH DIAERESIS}', b'\\"u')
self.register(u'\N{LATIN SMALL LETTER Y WITH ACUTE}', b"\\'y")
self.register(u'\N{LATIN SMALL LETTER Y WITH DIAERESIS}', b'\\"y')
self.register(u'\N{LATIN CAPITAL LETTER A WITH MACRON}', b'\\=A')
self.register(u'\N{LATIN SMALL LETTER A WITH MACRON}', b'\\=a')
self.register(u'\N{LATIN CAPITAL LETTER A WITH BREVE}', b'\\u A')
self.register(u'\N{LATIN SMALL LETTER A WITH BREVE}', b'\\u a')
self.register(u'\N{LATIN CAPITAL LETTER A WITH OGONEK}', b'\\c A')
self.register(u'\N{LATIN SMALL LETTER A WITH OGONEK}', b'\\c a')
self.register(u'\N{LATIN CAPITAL LETTER C WITH ACUTE}', b"\\'C")
self.register(u'\N{LATIN SMALL LETTER C WITH ACUTE}', b"\\'c")
self.register(u'\N{LATIN CAPITAL LETTER C WITH CIRCUMFLEX}', b'\\^C')
self.register(u'\N{LATIN SMALL LETTER C WITH CIRCUMFLEX}', b'\\^c')
self.register(u'\N{LATIN CAPITAL LETTER C WITH DOT ABOVE}', b'\\.C')
self.register(u'\N{LATIN SMALL LETTER C WITH DOT ABOVE}', b'\\.c')
self.register(u'\N{LATIN CAPITAL LETTER C WITH CARON}', b'\\v C')
self.register(u'\N{LATIN SMALL LETTER C WITH CARON}', b'\\v c')
self.register(u'\N{LATIN CAPITAL LETTER D WITH CARON}', b'\\v D')
self.register(u'\N{LATIN SMALL LETTER D WITH CARON}', b'\\v d')
self.register(u'\N{LATIN CAPITAL LETTER E WITH MACRON}', b'\\=E')
self.register(u'\N{LATIN SMALL LETTER E WITH MACRON}', b'\\=e')
self.register(u'\N{LATIN CAPITAL LETTER E WITH BREVE}', b'\\u E')
self.register(u'\N{LATIN SMALL LETTER E WITH BREVE}', b'\\u e')
self.register(u'\N{LATIN CAPITAL LETTER E WITH DOT ABOVE}', b'\\.E')
self.register(u'\N{LATIN SMALL LETTER E WITH DOT ABOVE}', b'\\.e')
self.register(u'\N{LATIN CAPITAL LETTER E WITH OGONEK}', b'\\c E')
self.register(u'\N{LATIN SMALL LETTER E WITH OGONEK}', b'\\c e')
self.register(u'\N{LATIN CAPITAL LETTER E WITH CARON}', b'\\v E')
self.register(u'\N{LATIN SMALL LETTER E WITH CARON}', b'\\v e')
self.register(u'\N{LATIN CAPITAL LETTER G WITH CIRCUMFLEX}', b'\\^G')
self.register(u'\N{LATIN SMALL LETTER G WITH CIRCUMFLEX}', b'\\^g')
self.register(u'\N{LATIN CAPITAL LETTER G WITH BREVE}', b'\\u G')
self.register(u'\N{LATIN SMALL LETTER G WITH BREVE}', b'\\u g')
self.register(u'\N{LATIN CAPITAL LETTER G WITH DOT ABOVE}', b'\\.G')
self.register(u'\N{LATIN SMALL LETTER G WITH DOT ABOVE}', b'\\.g')
self.register(u'\N{LATIN CAPITAL LETTER G WITH CEDILLA}', b'\\c G')
self.register(u'\N{LATIN SMALL LETTER G WITH CEDILLA}', b'\\c g')
self.register(u'\N{LATIN CAPITAL LETTER H WITH CIRCUMFLEX}', b'\\^H')
self.register(u'\N{LATIN SMALL LETTER H WITH CIRCUMFLEX}', b'\\^h')
self.register(u'\N{LATIN CAPITAL LETTER I WITH TILDE}', b'\\~I')
self.register(u'\N{LATIN SMALL LETTER I WITH TILDE}', b'\\~\\i')
self.register(u'\N{LATIN SMALL LETTER I WITH TILDE}', b'\\~i')
self.register(u'\N{LATIN CAPITAL LETTER I WITH MACRON}', b'\\=I')
self.register(u'\N{LATIN SMALL LETTER I WITH MACRON}', b'\\=\\i')
self.register(u'\N{LATIN SMALL LETTER I WITH MACRON}', b'\\=i')
self.register(u'\N{LATIN CAPITAL LETTER I WITH BREVE}', b'\\u I')
self.register(u'\N{LATIN SMALL LETTER I WITH BREVE}', b'\\u\\i')
self.register(u'\N{LATIN SMALL LETTER I WITH BREVE}', b'\\u i')
self.register(u'\N{LATIN CAPITAL LETTER I WITH OGONEK}', b'\\c I')
self.register(u'\N{LATIN SMALL LETTER I WITH OGONEK}', b'\\c i')
self.register(u'\N{LATIN CAPITAL LETTER I WITH DOT ABOVE}', b'\\.I')
self.register(u'\N{LATIN SMALL LETTER DOTLESS I}', b'\\i')
self.register(u'\N{LATIN CAPITAL LIGATURE IJ}', b'IJ', decode=False)
self.register(u'\N{LATIN SMALL LIGATURE IJ}', b'ij', decode=False)
self.register(u'\N{LATIN CAPITAL LETTER J WITH CIRCUMFLEX}', b'\\^J')
self.register(u'\N{LATIN SMALL LETTER J WITH CIRCUMFLEX}', b'\\^\\j')
self.register(u'\N{LATIN SMALL LETTER J WITH CIRCUMFLEX}', b'\\^j')
self.register(u'\N{LATIN CAPITAL LETTER K WITH CEDILLA}', b'\\c K')
self.register(u'\N{LATIN SMALL LETTER K WITH CEDILLA}', b'\\c k')
self.register(u'\N{LATIN CAPITAL LETTER L WITH ACUTE}', b"\\'L")
self.register(u'\N{LATIN SMALL LETTER L WITH ACUTE}', b"\\'l")
self.register(u'\N{LATIN CAPITAL LETTER L WITH CEDILLA}', b'\\c L')
self.register(u'\N{LATIN SMALL LETTER L WITH CEDILLA}', b'\\c l')
self.register(u'\N{LATIN CAPITAL LETTER L WITH CARON}', b'\\v L')
self.register(u'\N{LATIN SMALL LETTER L WITH CARON}', b'\\v l')
self.register(u'\N{LATIN CAPITAL LETTER L WITH STROKE}', b'\\L')
self.register(u'\N{LATIN SMALL LETTER L WITH STROKE}', b'\\l')
self.register(u'\N{LATIN CAPITAL LETTER N WITH ACUTE}', b"\\'N")
self.register(u'\N{LATIN SMALL LETTER N WITH ACUTE}', b"\\'n")
self.register(u'\N{LATIN CAPITAL LETTER N WITH CEDILLA}', b'\\c N')
self.register(u'\N{LATIN SMALL LETTER N WITH CEDILLA}', b'\\c n')
self.register(u'\N{LATIN CAPITAL LETTER N WITH CARON}', b'\\v N')
self.register(u'\N{LATIN SMALL LETTER N WITH CARON}', b'\\v n')
self.register(u'\N{LATIN CAPITAL LETTER O WITH MACRON}', b'\\=O')
self.register(u'\N{LATIN SMALL LETTER O WITH MACRON}', b'\\=o')
self.register(u'\N{LATIN CAPITAL LETTER O WITH BREVE}', b'\\u O')
self.register(u'\N{LATIN SMALL LETTER O WITH BREVE}', b'\\u o')
self.register(u'\N{LATIN CAPITAL LETTER O WITH DOUBLE ACUTE}', b'\\H O')
self.register(u'\N{LATIN SMALL LETTER O WITH DOUBLE ACUTE}', b'\\H o')
self.register(u'\N{LATIN CAPITAL LIGATURE OE}', b'\\OE')
self.register(u'\N{LATIN SMALL LIGATURE OE}', b'\\oe')
self.register(u'\N{LATIN CAPITAL LETTER R WITH ACUTE}', b"\\'R")
self.register(u'\N{LATIN SMALL LETTER R WITH ACUTE}', b"\\'r")
self.register(u'\N{LATIN CAPITAL LETTER R WITH CEDILLA}', b'\\c R')
self.register(u'\N{LATIN SMALL LETTER R WITH CEDILLA}', b'\\c r')
self.register(u'\N{LATIN CAPITAL LETTER R WITH CARON}', b'\\v R')
self.register(u'\N{LATIN SMALL LETTER R WITH CARON}', b'\\v r')
self.register(u'\N{LATIN CAPITAL LETTER S WITH ACUTE}', b"\\'S")
self.register(u'\N{LATIN SMALL LETTER S WITH ACUTE}', b"\\'s")
self.register(u'\N{LATIN CAPITAL LETTER S WITH CIRCUMFLEX}', b'\\^S')
self.register(u'\N{LATIN SMALL LETTER S WITH CIRCUMFLEX}', b'\\^s')
self.register(u'\N{LATIN CAPITAL LETTER S WITH CEDILLA}', b'\\c S')
self.register(u'\N{LATIN SMALL LETTER S WITH CEDILLA}', b'\\c s')
self.register(u'\N{LATIN CAPITAL LETTER S WITH CARON}', b'\\v S')
self.register(u'\N{LATIN SMALL LETTER S WITH CARON}', b'\\v s')
self.register(u'\N{LATIN CAPITAL LETTER T WITH CEDILLA}', b'\\c T')
self.register(u'\N{LATIN SMALL LETTER T WITH CEDILLA}', b'\\c t')
self.register(u'\N{LATIN CAPITAL LETTER T WITH CARON}', b'\\v T')
self.register(u'\N{LATIN SMALL LETTER T WITH CARON}', b'\\v t')
self.register(u'\N{LATIN CAPITAL LETTER U WITH TILDE}', b'\\~U')
self.register(u'\N{LATIN SMALL LETTER U WITH TILDE}', b'\\~u')
self.register(u'\N{LATIN CAPITAL LETTER U WITH MACRON}', b'\\=U')
self.register(u'\N{LATIN SMALL LETTER U WITH MACRON}', b'\\=u')
self.register(u'\N{LATIN CAPITAL LETTER U WITH BREVE}', b'\\u U')
self.register(u'\N{LATIN SMALL LETTER U WITH BREVE}', b'\\u u')
self.register(u'\N{LATIN CAPITAL LETTER U WITH RING ABOVE}', b'\\r U')
self.register(u'\N{LATIN SMALL LETTER U WITH RING ABOVE}', b'\\r u')
self.register(u'\N{LATIN CAPITAL LETTER U WITH DOUBLE ACUTE}', b'\\H U')
self.register(u'\N{LATIN SMALL LETTER U WITH DOUBLE ACUTE}', b'\\H u')
self.register(u'\N{LATIN CAPITAL LETTER U WITH OGONEK}', b'\\c U')
self.register(u'\N{LATIN SMALL LETTER U WITH OGONEK}', b'\\c u')
self.register(u'\N{LATIN CAPITAL LETTER W WITH CIRCUMFLEX}', b'\\^W')
self.register(u'\N{LATIN SMALL LETTER W WITH CIRCUMFLEX}', b'\\^w')
self.register(u'\N{LATIN CAPITAL LETTER Y WITH CIRCUMFLEX}', b'\\^Y')
self.register(u'\N{LATIN SMALL LETTER Y WITH CIRCUMFLEX}', b'\\^y')
self.register(u'\N{LATIN CAPITAL LETTER Y WITH DIAERESIS}', b'\\"Y')
self.register(u'\N{LATIN CAPITAL LETTER Z WITH ACUTE}', b"\\'Z")
self.register(u'\N{LATIN SMALL LETTER Z WITH ACUTE}', b"\\'Z")
self.register(u'\N{LATIN CAPITAL LETTER Z WITH DOT ABOVE}', b'\\.Z')
self.register(u'\N{LATIN SMALL LETTER Z WITH DOT ABOVE}', b'\\.Z')
self.register(u'\N{LATIN CAPITAL LETTER Z WITH CARON}', b'\\v Z')
self.register(u'\N{LATIN SMALL LETTER Z WITH CARON}', b'\\v z')
self.register(u'\N{LATIN CAPITAL LETTER DZ WITH CARON}', b'D\\v Z')
self.register(u'\N{LATIN CAPITAL LETTER D WITH SMALL LETTER Z WITH CARON}', b'D\\v z')
self.register(u'\N{LATIN SMALL LETTER DZ WITH CARON}', b'd\\v z')
self.register(u'\N{LATIN CAPITAL LETTER LJ}', b'LJ', decode=False)
self.register(u'\N{LATIN CAPITAL LETTER L WITH SMALL LETTER J}', b'Lj', decode=False)
self.register(u'\N{LATIN SMALL LETTER LJ}', b'lj', decode=False)
self.register(u'\N{LATIN CAPITAL LETTER NJ}', b'NJ', decode=False)
self.register(u'\N{LATIN CAPITAL LETTER N WITH SMALL LETTER J}', b'Nj', decode=False)
self.register(u'\N{LATIN SMALL LETTER NJ}', b'nj', decode=False)
self.register(u'\N{LATIN CAPITAL LETTER A WITH CARON}', b'\\v A')
self.register(u'\N{LATIN SMALL LETTER A WITH CARON}', b'\\v a')
self.register(u'\N{LATIN CAPITAL LETTER I WITH CARON}', b'\\v I')
self.register(u'\N{LATIN SMALL LETTER I WITH CARON}', b'\\v\\i')
self.register(u'\N{LATIN CAPITAL LETTER O WITH CARON}', b'\\v O')
self.register(u'\N{LATIN SMALL LETTER O WITH CARON}', b'\\v o')
self.register(u'\N{LATIN CAPITAL LETTER U WITH CARON}', b'\\v U')
self.register(u'\N{LATIN SMALL LETTER U WITH CARON}', b'\\v u')
self.register(u'\N{LATIN CAPITAL LETTER G WITH CARON}', b'\\v G')
self.register(u'\N{LATIN SMALL LETTER G WITH CARON}', b'\\v g')
self.register(u'\N{LATIN CAPITAL LETTER K WITH CARON}', b'\\v K')
self.register(u'\N{LATIN SMALL LETTER K WITH CARON}', b'\\v k')
self.register(u'\N{LATIN CAPITAL LETTER O WITH OGONEK}', b'\\c O')
self.register(u'\N{LATIN SMALL LETTER O WITH OGONEK}', b'\\c o')
self.register(u'\N{LATIN SMALL LETTER J WITH CARON}', b'\\v\\j')
self.register(u'\N{LATIN CAPITAL LETTER DZ}', b'DZ')
self.register(u'\N{LATIN CAPITAL LETTER D WITH SMALL LETTER Z}', b'Dz', decode=False)
self.register(u'\N{LATIN SMALL LETTER DZ}', b'dz', decode=False)
self.register(u'\N{LATIN CAPITAL LETTER G WITH ACUTE}', b"\\'G")
self.register(u'\N{LATIN SMALL LETTER G WITH ACUTE}', b"\\'g")
self.register(u'\N{LATIN CAPITAL LETTER AE WITH ACUTE}', b"\\'\\AE")
self.register(u'\N{LATIN SMALL LETTER AE WITH ACUTE}', b"\\'\\ae")
self.register(u'\N{LATIN CAPITAL LETTER O WITH STROKE AND ACUTE}', b"\\'\\O")
self.register(u'\N{LATIN SMALL LETTER O WITH STROKE AND ACUTE}', b"\\'\\o")
self.register(u'\N{PARTIAL DIFFERENTIAL}', b'\\partial', mode='math')
self.register(u'\N{N-ARY PRODUCT}', b'\\prod', mode='math')
self.register(u'\N{N-ARY SUMMATION}', b'\\sum', mode='math')
self.register(u'\N{SQUARE ROOT}', b'\\surd', mode='math')
self.register(u'\N{INFINITY}', b'\\infty', mode='math')
self.register(u'\N{INTEGRAL}', b'\\int', mode='math')
self.register(u'\N{INTERSECTION}', b'\\cap', mode='math')
self.register(u'\N{UNION}', b'\\cup', mode='math')
self.register(u'\N{RIGHTWARDS ARROW}', b'\\rightarrow', mode='math')
self.register(u'\N{RIGHTWARDS DOUBLE ARROW}', b'\\Rightarrow', mode='math')
self.register(u'\N{LEFTWARDS ARROW}', b'\\leftarrow', mode='math')
self.register(u'\N{LEFTWARDS DOUBLE ARROW}', b'\\Leftarrow', mode='math')
self.register(u'\N{LOGICAL OR}', b'\\vee', mode='math')
self.register(u'\N{LOGICAL AND}', b'\\wedge', mode='math')
self.register(u'\N{ALMOST EQUAL TO}', b'\\approx', mode='math')
self.register(u'\N{NOT EQUAL TO}', b'\\neq', mode='math')
self.register(u'\N{LESS-THAN OR EQUAL TO}', b'\\leq', mode='math')
self.register(u'\N{GREATER-THAN OR EQUAL TO}', b'\\geq', mode='math')
self.register(u'\N{MODIFIER LETTER CIRCUMFLEX ACCENT}', b'\\^{}')
self.register(u'\N{CARON}', b'\\v{}')
self.register(u'\N{BREVE}', b'\\u{}')
self.register(u'\N{DOT ABOVE}', b'\\.{}')
self.register(u'\N{RING ABOVE}', b'\\r{}')
self.register(u'\N{OGONEK}', b'\\c{}')
self.register(u'\N{SMALL TILDE}', b'\\~{}')
self.register(u'\N{DOUBLE ACUTE ACCENT}', b'\\H{}')
self.register(u'\N{LATIN SMALL LIGATURE FI}', b'fi', decode=False)
self.register(u'\N{LATIN SMALL LIGATURE FL}', b'fl', decode=False)
self.register(u'\N{LATIN SMALL LIGATURE FF}', b'ff', decode=False)
self.register(u'\N{GREEK SMALL LETTER ALPHA}', b'\\alpha', mode='math')
self.register(u'\N{GREEK SMALL LETTER BETA}', b'\\beta', mode='math')
self.register(u'\N{GREEK SMALL LETTER GAMMA}', b'\\gamma', mode='math')
self.register(u'\N{GREEK SMALL LETTER DELTA}', b'\\delta', mode='math')
self.register(u'\N{GREEK SMALL LETTER EPSILON}', b'\\epsilon', mode='math')
self.register(u'\N{GREEK SMALL LETTER ZETA}', b'\\zeta', mode='math')
self.register(u'\N{GREEK SMALL LETTER ETA}', b'\\eta', mode='math')
self.register(u'\N{GREEK SMALL LETTER THETA}', b'\\theta', mode='math')
self.register(u'\N{GREEK SMALL LETTER IOTA}', b'\\iota', mode='math')
self.register(u'\N{GREEK SMALL LETTER KAPPA}', b'\\kappa', mode='math')
self.register(u'\N{GREEK SMALL LETTER LAMDA}', b'\\lambda', mode='math') # LAMDA not LAMBDA
self.register(u'\N{GREEK SMALL LETTER MU}', b'\\mu', mode='math')
self.register(u'\N{GREEK SMALL LETTER NU}', b'\\nu', mode='math')
self.register(u'\N{GREEK SMALL LETTER XI}', b'\\xi', mode='math')
self.register(u'\N{GREEK SMALL LETTER OMICRON}', b'\\omicron', mode='math')
self.register(u'\N{GREEK SMALL LETTER PI}', b'\\pi', mode='math')
self.register(u'\N{GREEK SMALL LETTER RHO}', b'\\rho', mode='math')
self.register(u'\N{GREEK SMALL LETTER SIGMA}', b'\\sigma', mode='math')
self.register(u'\N{GREEK SMALL LETTER TAU}', b'\\tau', mode='math')
self.register(u'\N{GREEK SMALL LETTER UPSILON}', b'\\upsilon', mode='math')
self.register(u'\N{GREEK SMALL LETTER PHI}', b'\\phi', mode='math')
self.register(u'\N{GREEK SMALL LETTER CHI}', b'\\chi', mode='math')
self.register(u'\N{GREEK SMALL LETTER PSI}', b'\\psi', mode='math')
self.register(u'\N{GREEK SMALL LETTER OMEGA}', b'\\omega', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER ALPHA}', b'\\Alpha', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER BETA}', b'\\Beta', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER GAMMA}', b'\\Gamma', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER DELTA}', b'\\Delta', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER EPSILON}', b'\\Epsilon', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER ZETA}', b'\\Zeta', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER ETA}', b'\\Eta', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER THETA}', b'\\Theta', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER IOTA}', b'\\Iota', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER KAPPA}', b'\\Kappa', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER LAMDA}', b'\\Lambda', mode='math') # LAMDA not LAMBDA
self.register(u'\N{GREEK CAPITAL LETTER MU}', b'\\Mu', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER NU}', b'\\Nu', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER XI}', b'\\Xi', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER OMICRON}', b'\\Omicron', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER PI}', b'\\Pi', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER RHO}', b'\\Rho', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER SIGMA}', b'\\Sigma', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER TAU}', b'\\Tau', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER UPSILON}', b'\\Upsilon', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER PHI}', b'\\Phi', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER CHI}', b'\\Chi', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER PSI}', b'\\Psi', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER OMEGA}', b'\\Omega', mode='math')
self.register(u'\N{COPYRIGHT SIGN}', b'\\copyright')
self.register(u'\N{COPYRIGHT SIGN}', b'\\textcopyright')
self.register(u'\N{LATIN CAPITAL LETTER A WITH ACUTE}', b"\\'A")
self.register(u'\N{LATIN CAPITAL LETTER I WITH ACUTE}', b"\\'I")
self.register(u'\N{HORIZONTAL ELLIPSIS}', b'\\ldots')
self.register(u'\N{TRADE MARK SIGN}', b'^{TM}', mode='math')
self.register(u'\N{TRADE MARK SIGN}', b'\\texttrademark', package='textcomp')
def register(self, unicode_text, latex_text, mode='text', package=None,
decode=True, encode=True):
if package is not None:
# TODO implement packages
pass
if mode == 'math':
# also register text version
self.register(unicode_text, b'$' + latex_text + b'$', mode='text',
package=package, decode=decode, encode=encode)
# XXX for the time being, we do not perform in-math substitutions
return
# tokenize, and register unicode translation
tokens = tuple(self.lexer.get_tokens(latex_text, final=True))
if decode:
self.max_length = max(self.max_length, len(tokens))
if not tokens in self.unicode_map:
self.unicode_map[tokens] = unicode_text
# also register token variant with brackets, if appropriate
# for instance, "\'{e}" for "\'e", "\c{c}" for "\c c", etc.
# note: we do not remove brackets (they sometimes matter,
# e.g. bibtex uses them to prevent lower case transformation)
if (len(tokens) == 2
and tokens[0].name.startswith('control')
and tokens[1].name == 'chars'):
alt_tokens = (
tokens[0], latex_lexer.Token('chars', b'{'),
tokens[1], latex_lexer.Token('chars', b'}'),
)
if not alt_tokens in self.unicode_map:
self.unicode_map[alt_tokens] = u"{" + unicode_text + u"}"
if encode and unicode_text not in self.latex_map:
self.latex_map[unicode_text] = (latex_text, tokens)
_LATEX_UNICODE_TABLE = LatexUnicodeTable(latex_lexer.LatexIncrementalDecoder())
# incremental encoder does not need a buffer
# but decoder does
class LatexIncrementalEncoder(latex_lexer.LatexIncrementalEncoder):
"""Translating incremental encoder for latex. Maintains a state to
determine whether control spaces etc. need to be inserted.
"""
table = _LATEX_UNICODE_TABLE
"""Translation table."""
def __init__(self, errors='strict'):
latex_lexer.LatexIncrementalEncoder.__init__(self, errors=errors)
self.reset()
def reset(self):
self.state = 'M'
def get_space_bytes(self, bytes_):
"""Inserts space bytes in space eating mode."""
if self.state == 'S':
# in space eating mode
# control space needed?
if bytes_.startswith(b' '):
# replace by control space
return b'\\ ', bytes_[1:]
else:
# insert space (it is eaten, but needed for separation)
return b' ', bytes_
else:
return b'', bytes_
def get_latex_bytes(self, unicode_, final=False):
""":meth:`encode` calls this function to produce the final
sequence of latex bytes. This implementation simply
encodes every sequence in *inputenc* encoding. Override to
process the bytes in some other way (for example, for token
translation).
"""
if not isinstance(unicode_, basestring):
raise TypeError(
"expected unicode for encode input, but got {0} instead"
.format(unicode_.__class__.__name__))
# convert character by character
for pos, c in enumerate(unicode_):
# attempt input encoding first
# if this succeeds, then we don't need a latex representation
try:
bytes_ = c.encode(self.inputenc, 'strict')
except UnicodeEncodeError:
pass
else:
space, bytes_ = self.get_space_bytes(bytes_)
self.state = 'M'
if space:
yield space
yield bytes_
continue
# inputenc failed; let's try the latex equivalents
# of common unicode characters
try:
bytes_, tokens = self.table.latex_map[c]
except KeyError:
# translation failed
if errors == 'strict':
raise UnicodeEncodeError(
"latex", # codec
unicode_, # problematic input
pos, pos + 1, # location of problematic character
"don't know how to translate {1} ({0}) into latex"
.format(c, repr(c)))
elif errors == 'ignore':
pass
elif errors == 'replace':
# use the \\char command
# this assumes
# \usepackage[T1]{fontenc}
# \usepackage[utf8]{inputenc}
yield b'{\\char'
yield str(ord(c)).encode("ascii")
yield b'}'
self.state = 'M'
else:
raise ValueError(
"latex codec does not support {0} errors"
.format(errors))
else:
# translation succeeded
space, bytes_ = self.get_space_bytes(bytes_)
# update state
if tokens[-1].name == 'control_word':
# we're eating spaces
self.state = 'S'
else:
self.state = 'M'
if space:
yield space
yield bytes_
class LatexIncrementalDecoder(latex_lexer.LatexIncrementalDecoder):
"""Translating incremental decoder for latex."""
table = _LATEX_UNICODE_TABLE
"""Translation table."""
def __init__(self, errors='strict'):
latex_lexer.LatexIncrementalDecoder.__init__(self)
self.max_length = 0
def reset(self):
latex_lexer.LatexIncrementalDecoder.reset(self)
self.token_buffer = []
# python codecs API does not support multibuffer incremental decoders
def getstate(self):
raise NotImplementedError
def setstate(self, state):
raise NotImplementedError
def get_unicode_tokens(self, bytes_, final=False):
for token in self.get_tokens(bytes_, final=final):
# at this point, token_buffer does not match anything
self.token_buffer.append(token)
# new token appended at the end, see if we have a match now
# note: match is only possible at the *end* of the buffer
# because all other positions have already been checked in
# earlier iterations
for i in xrange(1, len(self.token_buffer) + 1):
last_tokens = tuple(self.token_buffer[-i:]) # last i tokens
try:
unicode_text = self.table.unicode_map[last_tokens]
except KeyError:
# no match: continue
continue
else:
# match!! flush buffer, and translate last bit
for token in self.token_buffer[:-i]: # exclude last i tokens
yield token.decode(self.inputenc)
yield unicode_text
self.token_buffer = []
break
# flush tokens that can no longer match
while len(self.token_buffer) >= self.table.max_length:
yield self.token_buffer.pop(0).decode(self.inputenc)
# also flush the buffer at the end
if final:
for token in self.token_buffer:
yield token.decode(self.inputenc)
self.token_buffer = []
class LatexCodec(codecs.Codec):
IncrementalEncoder = None
IncrementalDecoder = None
def encode(self, unicode_, errors='strict'):
"""Convert unicode string to latex bytes."""
return (
self.IncrementalEncoder(errors=errors).encode(unicode_, final=True),
len(unicode_),
)
def decode(self, bytes_, errors='strict'):
"""Convert latex bytes to unicode string."""
return (
self.IncrementalDecoder(errors=errors).decode(bytes_, final=True),
len(bytes_),
)
def find_latex(encoding):
# check if requested codec info is for latex encoding
if not encoding.startswith('latex'):
return None
# set up all classes with correct latex input encoding
inputenc_ = encoding[6:] if encoding.startswith('latex+') else 'ascii'
class IncrementalEncoder_(LatexIncrementalEncoder):
inputenc = inputenc_
class IncrementalDecoder_(LatexIncrementalDecoder):
inputenc = inputenc_
class Codec(LatexCodec):
IncrementalEncoder = IncrementalEncoder_
IncrementalDecoder = IncrementalDecoder_
class StreamWriter(Codec, codecs.StreamWriter):
pass
class StreamReader(Codec, codecs.StreamReader):
pass
return codecs.CodecInfo(
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder_,
incrementaldecoder=IncrementalDecoder_,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
codecs.register(find_latex)
| gpl-3.0 |
CameronLonsdale/sec-tools | python2/lib/python2.7/site-packages/pip/_vendor/html5lib/_trie/py.py | 1323 | 1775 | from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
from bisect import bisect_left
from ._base import Trie as ABCTrie
class Trie(ABCTrie):
def __init__(self, data):
if not all(isinstance(x, text_type) for x in data.keys()):
raise TypeError("All keys must be strings")
self._data = data
self._keys = sorted(data.keys())
self._cachestr = ""
self._cachepoints = (0, len(data))
def __contains__(self, key):
return key in self._data
def __len__(self):
return len(self._data)
def __iter__(self):
return iter(self._data)
def __getitem__(self, key):
return self._data[key]
def keys(self, prefix=None):
if prefix is None or prefix == "" or not self._keys:
return set(self._keys)
if prefix.startswith(self._cachestr):
lo, hi = self._cachepoints
start = i = bisect_left(self._keys, prefix, lo, hi)
else:
start = i = bisect_left(self._keys, prefix)
keys = set()
if start == len(self._keys):
return keys
while self._keys[i].startswith(prefix):
keys.add(self._keys[i])
i += 1
self._cachestr = prefix
self._cachepoints = (start, i)
return keys
def has_keys_with_prefix(self, prefix):
if prefix in self._data:
return True
if prefix.startswith(self._cachestr):
lo, hi = self._cachepoints
i = bisect_left(self._keys, prefix, lo, hi)
else:
i = bisect_left(self._keys, prefix)
if i == len(self._keys):
return False
return self._keys[i].startswith(prefix)
| mit |
unseenlaser/python-for-android | python3-alpha/python3-src/Tools/gdb/libpython.py | 88 | 56224 | #!/usr/bin/python
'''
From gdb 7 onwards, gdb's build can be configured --with-python, allowing gdb
to be extended with Python code e.g. for library-specific data visualizations,
such as for the C++ STL types. Documentation on this API can be seen at:
http://sourceware.org/gdb/current/onlinedocs/gdb/Python-API.html
This python module deals with the case when the process being debugged (the
"inferior process" in gdb parlance) is itself python, or more specifically,
linked against libpython. In this situation, almost every item of data is a
(PyObject*), and having the debugger merely print their addresses is not very
enlightening.
This module embeds knowledge about the implementation details of libpython so
that we can emit useful visualizations e.g. a string, a list, a dict, a frame
giving file/line information and the state of local variables
In particular, given a gdb.Value corresponding to a PyObject* in the inferior
process, we can generate a "proxy value" within the gdb process. For example,
given a PyObject* in the inferior process that is in fact a PyListObject*
holding three PyObject* that turn out to be PyBytesObject* instances, we can
generate a proxy value within the gdb process that is a list of bytes
instances:
[b"foo", b"bar", b"baz"]
Doing so can be expensive for complicated graphs of objects, and could take
some time, so we also have a "write_repr" method that writes a representation
of the data to a file-like object. This allows us to stop the traversal by
having the file-like object raise an exception if it gets too much data.
With both "proxyval" and "write_repr" we keep track of the set of all addresses
visited so far in the traversal, to avoid infinite recursion due to cycles in
the graph of object references.
We try to defer gdb.lookup_type() invocations for python types until as late as
possible: for a dynamically linked python binary, when the process starts in
the debugger, the libpython.so hasn't been dynamically loaded yet, so none of
the type names are known to the debugger
The module also extends gdb with some python-specific commands.
'''
from __future__ import with_statement
import gdb
import locale
import sys
# Look up the gdb.Type for some standard types:
_type_char_ptr = gdb.lookup_type('char').pointer() # char*
_type_unsigned_char_ptr = gdb.lookup_type('unsigned char').pointer() # unsigned char*
_type_void_ptr = gdb.lookup_type('void').pointer() # void*
_type_size_t = gdb.lookup_type('size_t')
SIZEOF_VOID_P = _type_void_ptr.sizeof
Py_TPFLAGS_HEAPTYPE = (1L << 9)
Py_TPFLAGS_LONG_SUBCLASS = (1L << 24)
Py_TPFLAGS_LIST_SUBCLASS = (1L << 25)
Py_TPFLAGS_TUPLE_SUBCLASS = (1L << 26)
Py_TPFLAGS_BYTES_SUBCLASS = (1L << 27)
Py_TPFLAGS_UNICODE_SUBCLASS = (1L << 28)
Py_TPFLAGS_DICT_SUBCLASS = (1L << 29)
Py_TPFLAGS_BASE_EXC_SUBCLASS = (1L << 30)
Py_TPFLAGS_TYPE_SUBCLASS = (1L << 31)
MAX_OUTPUT_LEN=1024
hexdigits = "0123456789abcdef"
ENCODING = locale.getpreferredencoding()
class NullPyObjectPtr(RuntimeError):
pass
def safety_limit(val):
# Given a integer value from the process being debugged, limit it to some
# safety threshold so that arbitrary breakage within said process doesn't
# break the gdb process too much (e.g. sizes of iterations, sizes of lists)
return min(val, 1000)
def safe_range(val):
# As per range, but don't trust the value too much: cap it to a safety
# threshold in case the data was corrupted
return xrange(safety_limit(val))
def write_unicode(file, text):
# Write a byte or unicode string to file. Unicode strings are encoded to
# ENCODING encoding with 'backslashreplace' error handler to avoid
# UnicodeEncodeError.
if isinstance(text, unicode):
text = text.encode(ENCODING, 'backslashreplace')
file.write(text)
def os_fsencode(filename):
if not isinstance(filename, unicode):
return filename
encoding = sys.getfilesystemencoding()
if encoding == 'mbcs':
# mbcs doesn't support surrogateescape
return filename.encode(encoding)
encoded = []
for char in filename:
# surrogateescape error handler
if 0xDC80 <= ord(char) <= 0xDCFF:
byte = chr(ord(char) - 0xDC00)
else:
byte = char.encode(encoding)
encoded.append(byte)
return ''.join(encoded)
class StringTruncated(RuntimeError):
pass
class TruncatedStringIO(object):
'''Similar to cStringIO, but can truncate the output by raising a
StringTruncated exception'''
def __init__(self, maxlen=None):
self._val = ''
self.maxlen = maxlen
def write(self, data):
if self.maxlen:
if len(data) + len(self._val) > self.maxlen:
# Truncation:
self._val += data[0:self.maxlen - len(self._val)]
raise StringTruncated()
self._val += data
def getvalue(self):
return self._val
class PyObjectPtr(object):
"""
Class wrapping a gdb.Value that's a either a (PyObject*) within the
inferior process, or some subclass pointer e.g. (PyBytesObject*)
There will be a subclass for every refined PyObject type that we care
about.
Note that at every stage the underlying pointer could be NULL, point
to corrupt data, etc; this is the debugger, after all.
"""
_typename = 'PyObject'
def __init__(self, gdbval, cast_to=None):
if cast_to:
self._gdbval = gdbval.cast(cast_to)
else:
self._gdbval = gdbval
def field(self, name):
'''
Get the gdb.Value for the given field within the PyObject, coping with
some python 2 versus python 3 differences.
Various libpython types are defined using the "PyObject_HEAD" and
"PyObject_VAR_HEAD" macros.
In Python 2, this these are defined so that "ob_type" and (for a var
object) "ob_size" are fields of the type in question.
In Python 3, this is defined as an embedded PyVarObject type thus:
PyVarObject ob_base;
so that the "ob_size" field is located insize the "ob_base" field, and
the "ob_type" is most easily accessed by casting back to a (PyObject*).
'''
if self.is_null():
raise NullPyObjectPtr(self)
if name == 'ob_type':
pyo_ptr = self._gdbval.cast(PyObjectPtr.get_gdb_type())
return pyo_ptr.dereference()[name]
if name == 'ob_size':
pyo_ptr = self._gdbval.cast(PyVarObjectPtr.get_gdb_type())
return pyo_ptr.dereference()[name]
# General case: look it up inside the object:
return self._gdbval.dereference()[name]
def pyop_field(self, name):
'''
Get a PyObjectPtr for the given PyObject* field within this PyObject,
coping with some python 2 versus python 3 differences.
'''
return PyObjectPtr.from_pyobject_ptr(self.field(name))
def write_field_repr(self, name, out, visited):
'''
Extract the PyObject* field named "name", and write its representation
to file-like object "out"
'''
field_obj = self.pyop_field(name)
field_obj.write_repr(out, visited)
def get_truncated_repr(self, maxlen):
'''
Get a repr-like string for the data, but truncate it at "maxlen" bytes
(ending the object graph traversal as soon as you do)
'''
out = TruncatedStringIO(maxlen)
try:
self.write_repr(out, set())
except StringTruncated:
# Truncation occurred:
return out.getvalue() + '...(truncated)'
# No truncation occurred:
return out.getvalue()
def type(self):
return PyTypeObjectPtr(self.field('ob_type'))
def is_null(self):
return 0 == long(self._gdbval)
def is_optimized_out(self):
'''
Is the value of the underlying PyObject* visible to the debugger?
This can vary with the precise version of the compiler used to build
Python, and the precise version of gdb.
See e.g. https://bugzilla.redhat.com/show_bug.cgi?id=556975 with
PyEval_EvalFrameEx's "f"
'''
return self._gdbval.is_optimized_out
def safe_tp_name(self):
try:
return self.type().field('tp_name').string()
except NullPyObjectPtr:
# NULL tp_name?
return 'unknown'
except RuntimeError:
# Can't even read the object at all?
return 'unknown'
def proxyval(self, visited):
'''
Scrape a value from the inferior process, and try to represent it
within the gdb process, whilst (hopefully) avoiding crashes when
the remote data is corrupt.
Derived classes will override this.
For example, a PyIntObject* with ob_ival 42 in the inferior process
should result in an int(42) in this process.
visited: a set of all gdb.Value pyobject pointers already visited
whilst generating this value (to guard against infinite recursion when
visiting object graphs with loops). Analogous to Py_ReprEnter and
Py_ReprLeave
'''
class FakeRepr(object):
"""
Class representing a non-descript PyObject* value in the inferior
process for when we don't have a custom scraper, intended to have
a sane repr().
"""
def __init__(self, tp_name, address):
self.tp_name = tp_name
self.address = address
def __repr__(self):
# For the NULL pointer, we have no way of knowing a type, so
# special-case it as per
# http://bugs.python.org/issue8032#msg100882
if self.address == 0:
return '0x0'
return '<%s at remote 0x%x>' % (self.tp_name, self.address)
return FakeRepr(self.safe_tp_name(),
long(self._gdbval))
def write_repr(self, out, visited):
'''
Write a string representation of the value scraped from the inferior
process to "out", a file-like object.
'''
# Default implementation: generate a proxy value and write its repr
# However, this could involve a lot of work for complicated objects,
# so for derived classes we specialize this
return out.write(repr(self.proxyval(visited)))
@classmethod
def subclass_from_type(cls, t):
'''
Given a PyTypeObjectPtr instance wrapping a gdb.Value that's a
(PyTypeObject*), determine the corresponding subclass of PyObjectPtr
to use
Ideally, we would look up the symbols for the global types, but that
isn't working yet:
(gdb) python print gdb.lookup_symbol('PyList_Type')[0].value
Traceback (most recent call last):
File "<string>", line 1, in <module>
NotImplementedError: Symbol type not yet supported in Python scripts.
Error while executing Python code.
For now, we use tp_flags, after doing some string comparisons on the
tp_name for some special-cases that don't seem to be visible through
flags
'''
try:
tp_name = t.field('tp_name').string()
tp_flags = int(t.field('tp_flags'))
except RuntimeError:
# Handle any kind of error e.g. NULL ptrs by simply using the base
# class
return cls
#print 'tp_flags = 0x%08x' % tp_flags
#print 'tp_name = %r' % tp_name
name_map = {'bool': PyBoolObjectPtr,
'classobj': PyClassObjectPtr,
'instance': PyInstanceObjectPtr,
'NoneType': PyNoneStructPtr,
'frame': PyFrameObjectPtr,
'set' : PySetObjectPtr,
'frozenset' : PySetObjectPtr,
'builtin_function_or_method' : PyCFunctionObjectPtr,
}
if tp_name in name_map:
return name_map[tp_name]
if tp_flags & Py_TPFLAGS_HEAPTYPE:
return HeapTypeObjectPtr
if tp_flags & Py_TPFLAGS_LONG_SUBCLASS:
return PyLongObjectPtr
if tp_flags & Py_TPFLAGS_LIST_SUBCLASS:
return PyListObjectPtr
if tp_flags & Py_TPFLAGS_TUPLE_SUBCLASS:
return PyTupleObjectPtr
if tp_flags & Py_TPFLAGS_BYTES_SUBCLASS:
return PyBytesObjectPtr
if tp_flags & Py_TPFLAGS_UNICODE_SUBCLASS:
return PyUnicodeObjectPtr
if tp_flags & Py_TPFLAGS_DICT_SUBCLASS:
return PyDictObjectPtr
if tp_flags & Py_TPFLAGS_BASE_EXC_SUBCLASS:
return PyBaseExceptionObjectPtr
#if tp_flags & Py_TPFLAGS_TYPE_SUBCLASS:
# return PyTypeObjectPtr
# Use the base class:
return cls
@classmethod
def from_pyobject_ptr(cls, gdbval):
'''
Try to locate the appropriate derived class dynamically, and cast
the pointer accordingly.
'''
try:
p = PyObjectPtr(gdbval)
cls = cls.subclass_from_type(p.type())
return cls(gdbval, cast_to=cls.get_gdb_type())
except RuntimeError:
# Handle any kind of error e.g. NULL ptrs by simply using the base
# class
pass
return cls(gdbval)
@classmethod
def get_gdb_type(cls):
return gdb.lookup_type(cls._typename).pointer()
def as_address(self):
return long(self._gdbval)
class PyVarObjectPtr(PyObjectPtr):
_typename = 'PyVarObject'
class ProxyAlreadyVisited(object):
'''
Placeholder proxy to use when protecting against infinite recursion due to
loops in the object graph.
Analogous to the values emitted by the users of Py_ReprEnter and Py_ReprLeave
'''
def __init__(self, rep):
self._rep = rep
def __repr__(self):
return self._rep
def _write_instance_repr(out, visited, name, pyop_attrdict, address):
'''Shared code for use by old-style and new-style classes:
write a representation to file-like object "out"'''
out.write('<')
out.write(name)
# Write dictionary of instance attributes:
if isinstance(pyop_attrdict, PyDictObjectPtr):
out.write('(')
first = True
for pyop_arg, pyop_val in pyop_attrdict.iteritems():
if not first:
out.write(', ')
first = False
out.write(pyop_arg.proxyval(visited))
out.write('=')
pyop_val.write_repr(out, visited)
out.write(')')
out.write(' at remote 0x%x>' % address)
class InstanceProxy(object):
def __init__(self, cl_name, attrdict, address):
self.cl_name = cl_name
self.attrdict = attrdict
self.address = address
def __repr__(self):
if isinstance(self.attrdict, dict):
kwargs = ', '.join(["%s=%r" % (arg, val)
for arg, val in self.attrdict.iteritems()])
return '<%s(%s) at remote 0x%x>' % (self.cl_name,
kwargs, self.address)
else:
return '<%s at remote 0x%x>' % (self.cl_name,
self.address)
def _PyObject_VAR_SIZE(typeobj, nitems):
return ( ( typeobj.field('tp_basicsize') +
nitems * typeobj.field('tp_itemsize') +
(SIZEOF_VOID_P - 1)
) & ~(SIZEOF_VOID_P - 1)
).cast(_type_size_t)
class HeapTypeObjectPtr(PyObjectPtr):
_typename = 'PyObject'
def get_attr_dict(self):
'''
Get the PyDictObject ptr representing the attribute dictionary
(or None if there's a problem)
'''
try:
typeobj = self.type()
dictoffset = int_from_int(typeobj.field('tp_dictoffset'))
if dictoffset != 0:
if dictoffset < 0:
type_PyVarObject_ptr = gdb.lookup_type('PyVarObject').pointer()
tsize = int_from_int(self._gdbval.cast(type_PyVarObject_ptr)['ob_size'])
if tsize < 0:
tsize = -tsize
size = _PyObject_VAR_SIZE(typeobj, tsize)
dictoffset += size
assert dictoffset > 0
assert dictoffset % SIZEOF_VOID_P == 0
dictptr = self._gdbval.cast(_type_char_ptr) + dictoffset
PyObjectPtrPtr = PyObjectPtr.get_gdb_type().pointer()
dictptr = dictptr.cast(PyObjectPtrPtr)
return PyObjectPtr.from_pyobject_ptr(dictptr.dereference())
except RuntimeError:
# Corrupt data somewhere; fail safe
pass
# Not found, or some kind of error:
return None
def proxyval(self, visited):
'''
Support for new-style classes.
Currently we just locate the dictionary using a transliteration to
python of _PyObject_GetDictPtr, ignoring descriptors
'''
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('<...>')
visited.add(self.as_address())
pyop_attr_dict = self.get_attr_dict()
if pyop_attr_dict:
attr_dict = pyop_attr_dict.proxyval(visited)
else:
attr_dict = {}
tp_name = self.safe_tp_name()
# New-style class:
return InstanceProxy(tp_name, attr_dict, long(self._gdbval))
def write_repr(self, out, visited):
# Guard against infinite loops:
if self.as_address() in visited:
out.write('<...>')
return
visited.add(self.as_address())
pyop_attrdict = self.get_attr_dict()
_write_instance_repr(out, visited,
self.safe_tp_name(), pyop_attrdict, self.as_address())
class ProxyException(Exception):
def __init__(self, tp_name, args):
self.tp_name = tp_name
self.args = args
def __repr__(self):
return '%s%r' % (self.tp_name, self.args)
class PyBaseExceptionObjectPtr(PyObjectPtr):
"""
Class wrapping a gdb.Value that's a PyBaseExceptionObject* i.e. an exception
within the process being debugged.
"""
_typename = 'PyBaseExceptionObject'
def proxyval(self, visited):
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('(...)')
visited.add(self.as_address())
arg_proxy = self.pyop_field('args').proxyval(visited)
return ProxyException(self.safe_tp_name(),
arg_proxy)
def write_repr(self, out, visited):
# Guard against infinite loops:
if self.as_address() in visited:
out.write('(...)')
return
visited.add(self.as_address())
out.write(self.safe_tp_name())
self.write_field_repr('args', out, visited)
class PyClassObjectPtr(PyObjectPtr):
"""
Class wrapping a gdb.Value that's a PyClassObject* i.e. a <classobj>
instance within the process being debugged.
"""
_typename = 'PyClassObject'
class BuiltInFunctionProxy(object):
def __init__(self, ml_name):
self.ml_name = ml_name
def __repr__(self):
return "<built-in function %s>" % self.ml_name
class BuiltInMethodProxy(object):
def __init__(self, ml_name, pyop_m_self):
self.ml_name = ml_name
self.pyop_m_self = pyop_m_self
def __repr__(self):
return ('<built-in method %s of %s object at remote 0x%x>'
% (self.ml_name,
self.pyop_m_self.safe_tp_name(),
self.pyop_m_self.as_address())
)
class PyCFunctionObjectPtr(PyObjectPtr):
"""
Class wrapping a gdb.Value that's a PyCFunctionObject*
(see Include/methodobject.h and Objects/methodobject.c)
"""
_typename = 'PyCFunctionObject'
def proxyval(self, visited):
m_ml = self.field('m_ml') # m_ml is a (PyMethodDef*)
ml_name = m_ml['ml_name'].string()
pyop_m_self = self.pyop_field('m_self')
if pyop_m_self.is_null():
return BuiltInFunctionProxy(ml_name)
else:
return BuiltInMethodProxy(ml_name, pyop_m_self)
class PyCodeObjectPtr(PyObjectPtr):
"""
Class wrapping a gdb.Value that's a PyCodeObject* i.e. a <code> instance
within the process being debugged.
"""
_typename = 'PyCodeObject'
def addr2line(self, addrq):
'''
Get the line number for a given bytecode offset
Analogous to PyCode_Addr2Line; translated from pseudocode in
Objects/lnotab_notes.txt
'''
co_lnotab = self.pyop_field('co_lnotab').proxyval(set())
# Initialize lineno to co_firstlineno as per PyCode_Addr2Line
# not 0, as lnotab_notes.txt has it:
lineno = int_from_int(self.field('co_firstlineno'))
addr = 0
for addr_incr, line_incr in zip(co_lnotab[::2], co_lnotab[1::2]):
addr += ord(addr_incr)
if addr > addrq:
return lineno
lineno += ord(line_incr)
return lineno
class PyDictObjectPtr(PyObjectPtr):
"""
Class wrapping a gdb.Value that's a PyDictObject* i.e. a dict instance
within the process being debugged.
"""
_typename = 'PyDictObject'
def iteritems(self):
'''
Yields a sequence of (PyObjectPtr key, PyObjectPtr value) pairs,
analagous to dict.iteritems()
'''
for i in safe_range(self.field('ma_mask') + 1):
ep = self.field('ma_table') + i
pyop_value = PyObjectPtr.from_pyobject_ptr(ep['me_value'])
if not pyop_value.is_null():
pyop_key = PyObjectPtr.from_pyobject_ptr(ep['me_key'])
yield (pyop_key, pyop_value)
def proxyval(self, visited):
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('{...}')
visited.add(self.as_address())
result = {}
for pyop_key, pyop_value in self.iteritems():
proxy_key = pyop_key.proxyval(visited)
proxy_value = pyop_value.proxyval(visited)
result[proxy_key] = proxy_value
return result
def write_repr(self, out, visited):
# Guard against infinite loops:
if self.as_address() in visited:
out.write('{...}')
return
visited.add(self.as_address())
out.write('{')
first = True
for pyop_key, pyop_value in self.iteritems():
if not first:
out.write(', ')
first = False
pyop_key.write_repr(out, visited)
out.write(': ')
pyop_value.write_repr(out, visited)
out.write('}')
class PyInstanceObjectPtr(PyObjectPtr):
_typename = 'PyInstanceObject'
def proxyval(self, visited):
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('<...>')
visited.add(self.as_address())
# Get name of class:
in_class = self.pyop_field('in_class')
cl_name = in_class.pyop_field('cl_name').proxyval(visited)
# Get dictionary of instance attributes:
in_dict = self.pyop_field('in_dict').proxyval(visited)
# Old-style class:
return InstanceProxy(cl_name, in_dict, long(self._gdbval))
def write_repr(self, out, visited):
# Guard against infinite loops:
if self.as_address() in visited:
out.write('<...>')
return
visited.add(self.as_address())
# Old-style class:
# Get name of class:
in_class = self.pyop_field('in_class')
cl_name = in_class.pyop_field('cl_name').proxyval(visited)
# Get dictionary of instance attributes:
pyop_in_dict = self.pyop_field('in_dict')
_write_instance_repr(out, visited,
cl_name, pyop_in_dict, self.as_address())
class PyListObjectPtr(PyObjectPtr):
_typename = 'PyListObject'
def __getitem__(self, i):
# Get the gdb.Value for the (PyObject*) with the given index:
field_ob_item = self.field('ob_item')
return field_ob_item[i]
def proxyval(self, visited):
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('[...]')
visited.add(self.as_address())
result = [PyObjectPtr.from_pyobject_ptr(self[i]).proxyval(visited)
for i in safe_range(int_from_int(self.field('ob_size')))]
return result
def write_repr(self, out, visited):
# Guard against infinite loops:
if self.as_address() in visited:
out.write('[...]')
return
visited.add(self.as_address())
out.write('[')
for i in safe_range(int_from_int(self.field('ob_size'))):
if i > 0:
out.write(', ')
element = PyObjectPtr.from_pyobject_ptr(self[i])
element.write_repr(out, visited)
out.write(']')
class PyLongObjectPtr(PyObjectPtr):
_typename = 'PyLongObject'
def proxyval(self, visited):
'''
Python's Include/longobjrep.h has this declaration:
struct _longobject {
PyObject_VAR_HEAD
digit ob_digit[1];
};
with this description:
The absolute value of a number is equal to
SUM(for i=0 through abs(ob_size)-1) ob_digit[i] * 2**(SHIFT*i)
Negative numbers are represented with ob_size < 0;
zero is represented by ob_size == 0.
where SHIFT can be either:
#define PyLong_SHIFT 30
#define PyLong_SHIFT 15
'''
ob_size = long(self.field('ob_size'))
if ob_size == 0:
return 0L
ob_digit = self.field('ob_digit')
if gdb.lookup_type('digit').sizeof == 2:
SHIFT = 15L
else:
SHIFT = 30L
digits = [long(ob_digit[i]) * 2**(SHIFT*i)
for i in safe_range(abs(ob_size))]
result = sum(digits)
if ob_size < 0:
result = -result
return result
def write_repr(self, out, visited):
# Write this out as a Python 3 int literal, i.e. without the "L" suffix
proxy = self.proxyval(visited)
out.write("%s" % proxy)
class PyBoolObjectPtr(PyLongObjectPtr):
"""
Class wrapping a gdb.Value that's a PyBoolObject* i.e. one of the two
<bool> instances (Py_True/Py_False) within the process being debugged.
"""
def proxyval(self, visited):
if PyLongObjectPtr.proxyval(self, visited):
return True
else:
return False
class PyNoneStructPtr(PyObjectPtr):
"""
Class wrapping a gdb.Value that's a PyObject* pointing to the
singleton (we hope) _Py_NoneStruct with ob_type PyNone_Type
"""
_typename = 'PyObject'
def proxyval(self, visited):
return None
class PyFrameObjectPtr(PyObjectPtr):
_typename = 'PyFrameObject'
def __init__(self, gdbval, cast_to):
PyObjectPtr.__init__(self, gdbval, cast_to)
if not self.is_optimized_out():
self.co = PyCodeObjectPtr.from_pyobject_ptr(self.field('f_code'))
self.co_name = self.co.pyop_field('co_name')
self.co_filename = self.co.pyop_field('co_filename')
self.f_lineno = int_from_int(self.field('f_lineno'))
self.f_lasti = int_from_int(self.field('f_lasti'))
self.co_nlocals = int_from_int(self.co.field('co_nlocals'))
self.co_varnames = PyTupleObjectPtr.from_pyobject_ptr(self.co.field('co_varnames'))
def iter_locals(self):
'''
Yield a sequence of (name,value) pairs of PyObjectPtr instances, for
the local variables of this frame
'''
if self.is_optimized_out():
return
f_localsplus = self.field('f_localsplus')
for i in safe_range(self.co_nlocals):
pyop_value = PyObjectPtr.from_pyobject_ptr(f_localsplus[i])
if not pyop_value.is_null():
pyop_name = PyObjectPtr.from_pyobject_ptr(self.co_varnames[i])
yield (pyop_name, pyop_value)
def iter_globals(self):
'''
Yield a sequence of (name,value) pairs of PyObjectPtr instances, for
the global variables of this frame
'''
if self.is_optimized_out():
return
pyop_globals = self.pyop_field('f_globals')
return pyop_globals.iteritems()
def iter_builtins(self):
'''
Yield a sequence of (name,value) pairs of PyObjectPtr instances, for
the builtin variables
'''
if self.is_optimized_out():
return
pyop_builtins = self.pyop_field('f_builtins')
return pyop_builtins.iteritems()
def get_var_by_name(self, name):
'''
Look for the named local variable, returning a (PyObjectPtr, scope) pair
where scope is a string 'local', 'global', 'builtin'
If not found, return (None, None)
'''
for pyop_name, pyop_value in self.iter_locals():
if name == pyop_name.proxyval(set()):
return pyop_value, 'local'
for pyop_name, pyop_value in self.iter_globals():
if name == pyop_name.proxyval(set()):
return pyop_value, 'global'
for pyop_name, pyop_value in self.iter_builtins():
if name == pyop_name.proxyval(set()):
return pyop_value, 'builtin'
return None, None
def filename(self):
'''Get the path of the current Python source file, as a string'''
if self.is_optimized_out():
return '(frame information optimized out)'
return self.co_filename.proxyval(set())
def current_line_num(self):
'''Get current line number as an integer (1-based)
Translated from PyFrame_GetLineNumber and PyCode_Addr2Line
See Objects/lnotab_notes.txt
'''
if self.is_optimized_out():
return None
f_trace = self.field('f_trace')
if long(f_trace) != 0:
# we have a non-NULL f_trace:
return self.f_lineno
else:
#try:
return self.co.addr2line(self.f_lasti)
#except ValueError:
# return self.f_lineno
def current_line(self):
'''Get the text of the current source line as a string, with a trailing
newline character'''
if self.is_optimized_out():
return '(frame information optimized out)'
filename = self.filename()
try:
f = open(os_fsencode(filename), 'r')
except IOError:
return None
with f:
all_lines = f.readlines()
# Convert from 1-based current_line_num to 0-based list offset:
return all_lines[self.current_line_num()-1]
def write_repr(self, out, visited):
if self.is_optimized_out():
out.write('(frame information optimized out)')
return
out.write('Frame 0x%x, for file %s, line %i, in %s ('
% (self.as_address(),
self.co_filename.proxyval(visited),
self.current_line_num(),
self.co_name.proxyval(visited)))
first = True
for pyop_name, pyop_value in self.iter_locals():
if not first:
out.write(', ')
first = False
out.write(pyop_name.proxyval(visited))
out.write('=')
pyop_value.write_repr(out, visited)
out.write(')')
def print_traceback(self):
if self.is_optimized_out():
sys.stdout.write(' (frame information optimized out)\n')
visited = set()
sys.stdout.write(' File "%s", line %i, in %s\n'
% (self.co_filename.proxyval(visited),
self.current_line_num(),
self.co_name.proxyval(visited)))
class PySetObjectPtr(PyObjectPtr):
_typename = 'PySetObject'
def proxyval(self, visited):
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('%s(...)' % self.safe_tp_name())
visited.add(self.as_address())
members = []
table = self.field('table')
for i in safe_range(self.field('mask')+1):
setentry = table[i]
key = setentry['key']
if key != 0:
key_proxy = PyObjectPtr.from_pyobject_ptr(key).proxyval(visited)
if key_proxy != '<dummy key>':
members.append(key_proxy)
if self.safe_tp_name() == 'frozenset':
return frozenset(members)
else:
return set(members)
def write_repr(self, out, visited):
# Emulate Python 3's set_repr
tp_name = self.safe_tp_name()
# Guard against infinite loops:
if self.as_address() in visited:
out.write('(...)')
return
visited.add(self.as_address())
# Python 3's set_repr special-cases the empty set:
if not self.field('used'):
out.write(tp_name)
out.write('()')
return
# Python 3 uses {} for set literals:
if tp_name != 'set':
out.write(tp_name)
out.write('(')
out.write('{')
first = True
table = self.field('table')
for i in safe_range(self.field('mask')+1):
setentry = table[i]
key = setentry['key']
if key != 0:
pyop_key = PyObjectPtr.from_pyobject_ptr(key)
key_proxy = pyop_key.proxyval(visited) # FIXME!
if key_proxy != '<dummy key>':
if not first:
out.write(', ')
first = False
pyop_key.write_repr(out, visited)
out.write('}')
if tp_name != 'set':
out.write(')')
class PyBytesObjectPtr(PyObjectPtr):
_typename = 'PyBytesObject'
def __str__(self):
field_ob_size = self.field('ob_size')
field_ob_sval = self.field('ob_sval')
char_ptr = field_ob_sval.address.cast(_type_unsigned_char_ptr)
return ''.join([chr(char_ptr[i]) for i in safe_range(field_ob_size)])
def proxyval(self, visited):
return str(self)
def write_repr(self, out, visited):
# Write this out as a Python 3 bytes literal, i.e. with a "b" prefix
# Get a PyStringObject* within the Python 2 gdb process:
proxy = self.proxyval(visited)
# Transliteration of Python 3's Objects/bytesobject.c:PyBytes_Repr
# to Python 2 code:
quote = "'"
if "'" in proxy and not '"' in proxy:
quote = '"'
out.write('b')
out.write(quote)
for byte in proxy:
if byte == quote or byte == '\\':
out.write('\\')
out.write(byte)
elif byte == '\t':
out.write('\\t')
elif byte == '\n':
out.write('\\n')
elif byte == '\r':
out.write('\\r')
elif byte < ' ' or ord(byte) >= 0x7f:
out.write('\\x')
out.write(hexdigits[(ord(byte) & 0xf0) >> 4])
out.write(hexdigits[ord(byte) & 0xf])
else:
out.write(byte)
out.write(quote)
class PyTupleObjectPtr(PyObjectPtr):
_typename = 'PyTupleObject'
def __getitem__(self, i):
# Get the gdb.Value for the (PyObject*) with the given index:
field_ob_item = self.field('ob_item')
return field_ob_item[i]
def proxyval(self, visited):
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('(...)')
visited.add(self.as_address())
result = tuple([PyObjectPtr.from_pyobject_ptr(self[i]).proxyval(visited)
for i in safe_range(int_from_int(self.field('ob_size')))])
return result
def write_repr(self, out, visited):
# Guard against infinite loops:
if self.as_address() in visited:
out.write('(...)')
return
visited.add(self.as_address())
out.write('(')
for i in safe_range(int_from_int(self.field('ob_size'))):
if i > 0:
out.write(', ')
element = PyObjectPtr.from_pyobject_ptr(self[i])
element.write_repr(out, visited)
if self.field('ob_size') == 1:
out.write(',)')
else:
out.write(')')
class PyTypeObjectPtr(PyObjectPtr):
_typename = 'PyTypeObject'
def _unichr_is_printable(char):
# Logic adapted from Python 3's Tools/unicode/makeunicodedata.py
if char == u" ":
return True
import unicodedata
return unicodedata.category(char) not in ("C", "Z")
if sys.maxunicode >= 0x10000:
_unichr = unichr
else:
# Needed for proper surrogate support if sizeof(Py_UNICODE) is 2 in gdb
def _unichr(x):
if x < 0x10000:
return unichr(x)
x -= 0x10000
ch1 = 0xD800 | (x >> 10)
ch2 = 0xDC00 | (x & 0x3FF)
return unichr(ch1) + unichr(ch2)
class PyUnicodeObjectPtr(PyObjectPtr):
_typename = 'PyUnicodeObject'
def char_width(self):
_type_Py_UNICODE = gdb.lookup_type('Py_UNICODE')
return _type_Py_UNICODE.sizeof
def proxyval(self, visited):
# From unicodeobject.h:
# Py_ssize_t length; /* Length of raw Unicode data in buffer */
# Py_UNICODE *str; /* Raw Unicode buffer */
field_length = long(self.field('length'))
field_str = self.field('str')
# Gather a list of ints from the Py_UNICODE array; these are either
# UCS-2 or UCS-4 code points:
if self.char_width() > 2:
Py_UNICODEs = [int(field_str[i]) for i in safe_range(field_length)]
else:
# A more elaborate routine if sizeof(Py_UNICODE) is 2 in the
# inferior process: we must join surrogate pairs.
Py_UNICODEs = []
i = 0
limit = safety_limit(field_length)
while i < limit:
ucs = int(field_str[i])
i += 1
if ucs < 0xD800 or ucs >= 0xDC00 or i == field_length:
Py_UNICODEs.append(ucs)
continue
# This could be a surrogate pair.
ucs2 = int(field_str[i])
if ucs2 < 0xDC00 or ucs2 > 0xDFFF:
continue
code = (ucs & 0x03FF) << 10
code |= ucs2 & 0x03FF
code += 0x00010000
Py_UNICODEs.append(code)
i += 1
# Convert the int code points to unicode characters, and generate a
# local unicode instance.
# This splits surrogate pairs if sizeof(Py_UNICODE) is 2 here (in gdb).
result = u''.join([_unichr(ucs) for ucs in Py_UNICODEs])
return result
def write_repr(self, out, visited):
# Write this out as a Python 3 str literal, i.e. without a "u" prefix
# Get a PyUnicodeObject* within the Python 2 gdb process:
proxy = self.proxyval(visited)
# Transliteration of Python 3's Object/unicodeobject.c:unicode_repr
# to Python 2:
if "'" in proxy and '"' not in proxy:
quote = '"'
else:
quote = "'"
out.write(quote)
i = 0
while i < len(proxy):
ch = proxy[i]
i += 1
# Escape quotes and backslashes
if ch == quote or ch == '\\':
out.write('\\')
out.write(ch)
# Map special whitespace to '\t', \n', '\r'
elif ch == '\t':
out.write('\\t')
elif ch == '\n':
out.write('\\n')
elif ch == '\r':
out.write('\\r')
# Map non-printable US ASCII to '\xhh' */
elif ch < ' ' or ch == 0x7F:
out.write('\\x')
out.write(hexdigits[(ord(ch) >> 4) & 0x000F])
out.write(hexdigits[ord(ch) & 0x000F])
# Copy ASCII characters as-is
elif ord(ch) < 0x7F:
out.write(ch)
# Non-ASCII characters
else:
ucs = ch
ch2 = None
if sys.maxunicode < 0x10000:
# If sizeof(Py_UNICODE) is 2 here (in gdb), join
# surrogate pairs before calling _unichr_is_printable.
if (i < len(proxy)
and 0xD800 <= ord(ch) < 0xDC00 \
and 0xDC00 <= ord(proxy[i]) <= 0xDFFF):
ch2 = proxy[i]
ucs = ch + ch2
i += 1
# Unfortuately, Python 2's unicode type doesn't seem
# to expose the "isprintable" method
printable = _unichr_is_printable(ucs)
if printable:
try:
ucs.encode(ENCODING)
except UnicodeEncodeError:
printable = False
# Map Unicode whitespace and control characters
# (categories Z* and C* except ASCII space)
if not printable:
if ch2 is not None:
# Match Python 3's representation of non-printable
# wide characters.
code = (ord(ch) & 0x03FF) << 10
code |= ord(ch2) & 0x03FF
code += 0x00010000
else:
code = ord(ucs)
# Map 8-bit characters to '\\xhh'
if code <= 0xff:
out.write('\\x')
out.write(hexdigits[(code >> 4) & 0x000F])
out.write(hexdigits[code & 0x000F])
# Map 21-bit characters to '\U00xxxxxx'
elif code >= 0x10000:
out.write('\\U')
out.write(hexdigits[(code >> 28) & 0x0000000F])
out.write(hexdigits[(code >> 24) & 0x0000000F])
out.write(hexdigits[(code >> 20) & 0x0000000F])
out.write(hexdigits[(code >> 16) & 0x0000000F])
out.write(hexdigits[(code >> 12) & 0x0000000F])
out.write(hexdigits[(code >> 8) & 0x0000000F])
out.write(hexdigits[(code >> 4) & 0x0000000F])
out.write(hexdigits[code & 0x0000000F])
# Map 16-bit characters to '\uxxxx'
else:
out.write('\\u')
out.write(hexdigits[(code >> 12) & 0x000F])
out.write(hexdigits[(code >> 8) & 0x000F])
out.write(hexdigits[(code >> 4) & 0x000F])
out.write(hexdigits[code & 0x000F])
else:
# Copy characters as-is
out.write(ch)
if ch2 is not None:
out.write(ch2)
out.write(quote)
def int_from_int(gdbval):
return int(str(gdbval))
def stringify(val):
# TODO: repr() puts everything on one line; pformat can be nicer, but
# can lead to v.long results; this function isolates the choice
if True:
return repr(val)
else:
from pprint import pformat
return pformat(val)
class PyObjectPtrPrinter:
"Prints a (PyObject*)"
def __init__ (self, gdbval):
self.gdbval = gdbval
def to_string (self):
pyop = PyObjectPtr.from_pyobject_ptr(self.gdbval)
if True:
return pyop.get_truncated_repr(MAX_OUTPUT_LEN)
else:
# Generate full proxy value then stringify it.
# Doing so could be expensive
proxyval = pyop.proxyval(set())
return stringify(proxyval)
def pretty_printer_lookup(gdbval):
type = gdbval.type.unqualified()
if type.code == gdb.TYPE_CODE_PTR:
type = type.target().unqualified()
t = str(type)
if t in ("PyObject", "PyFrameObject", "PyUnicodeObject"):
return PyObjectPtrPrinter(gdbval)
"""
During development, I've been manually invoking the code in this way:
(gdb) python
import sys
sys.path.append('/home/david/coding/python-gdb')
import libpython
end
then reloading it after each edit like this:
(gdb) python reload(libpython)
The following code should ensure that the prettyprinter is registered
if the code is autoloaded by gdb when visiting libpython.so, provided
that this python file is installed to the same path as the library (or its
.debug file) plus a "-gdb.py" suffix, e.g:
/usr/lib/libpython2.6.so.1.0-gdb.py
/usr/lib/debug/usr/lib/libpython2.6.so.1.0.debug-gdb.py
"""
def register (obj):
if obj == None:
obj = gdb
# Wire up the pretty-printer
obj.pretty_printers.append(pretty_printer_lookup)
register (gdb.current_objfile ())
# Unfortunately, the exact API exposed by the gdb module varies somewhat
# from build to build
# See http://bugs.python.org/issue8279?#msg102276
class Frame(object):
'''
Wrapper for gdb.Frame, adding various methods
'''
def __init__(self, gdbframe):
self._gdbframe = gdbframe
def older(self):
older = self._gdbframe.older()
if older:
return Frame(older)
else:
return None
def newer(self):
newer = self._gdbframe.newer()
if newer:
return Frame(newer)
else:
return None
def select(self):
'''If supported, select this frame and return True; return False if unsupported
Not all builds have a gdb.Frame.select method; seems to be present on Fedora 12
onwards, but absent on Ubuntu buildbot'''
if not hasattr(self._gdbframe, 'select'):
print ('Unable to select frame: '
'this build of gdb does not expose a gdb.Frame.select method')
return False
self._gdbframe.select()
return True
def get_index(self):
'''Calculate index of frame, starting at 0 for the newest frame within
this thread'''
index = 0
# Go down until you reach the newest frame:
iter_frame = self
while iter_frame.newer():
index += 1
iter_frame = iter_frame.newer()
return index
def is_evalframeex(self):
'''Is this a PyEval_EvalFrameEx frame?'''
if self._gdbframe.name() == 'PyEval_EvalFrameEx':
'''
I believe we also need to filter on the inline
struct frame_id.inline_depth, only regarding frames with
an inline depth of 0 as actually being this function
So we reject those with type gdb.INLINE_FRAME
'''
if self._gdbframe.type() == gdb.NORMAL_FRAME:
# We have a PyEval_EvalFrameEx frame:
return True
return False
def get_pyop(self):
try:
f = self._gdbframe.read_var('f')
return PyFrameObjectPtr.from_pyobject_ptr(f)
except ValueError:
return None
@classmethod
def get_selected_frame(cls):
_gdbframe = gdb.selected_frame()
if _gdbframe:
return Frame(_gdbframe)
return None
@classmethod
def get_selected_python_frame(cls):
'''Try to obtain the Frame for the python code in the selected frame,
or None'''
frame = cls.get_selected_frame()
while frame:
if frame.is_evalframeex():
return frame
frame = frame.older()
# Not found:
return None
def print_summary(self):
if self.is_evalframeex():
pyop = self.get_pyop()
if pyop:
line = pyop.get_truncated_repr(MAX_OUTPUT_LEN)
write_unicode(sys.stdout, '#%i %s\n' % (self.get_index(), line))
line = pyop.current_line()
if line is not None:
sys.stdout.write(line)
else:
sys.stdout.write('#%i (unable to read python frame information)\n' % self.get_index())
else:
sys.stdout.write('#%i\n' % self.get_index())
def print_traceback(self):
if self.is_evalframeex():
pyop = self.get_pyop()
if pyop:
pyop.print_traceback()
line = pyop.current_line()
if line is not None:
sys.stdout.write(' %s\n' % line.strip())
else:
sys.stdout.write(' (unable to read python frame information)\n')
else:
sys.stdout.write(' (not a python frame)\n')
class PyList(gdb.Command):
'''List the current Python source code, if any
Use
py-list START
to list at a different line number within the python source.
Use
py-list START, END
to list a specific range of lines within the python source.
'''
def __init__(self):
gdb.Command.__init__ (self,
"py-list",
gdb.COMMAND_FILES,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
import re
start = None
end = None
m = re.match(r'\s*(\d+)\s*', args)
if m:
start = int(m.group(0))
end = start + 10
m = re.match(r'\s*(\d+)\s*,\s*(\d+)\s*', args)
if m:
start, end = map(int, m.groups())
frame = Frame.get_selected_python_frame()
if not frame:
print 'Unable to locate python frame'
return
pyop = frame.get_pyop()
if not pyop:
print 'Unable to read information on python frame'
return
filename = pyop.filename()
lineno = pyop.current_line_num()
if start is None:
start = lineno - 5
end = lineno + 5
if start<1:
start = 1
try:
f = open(os_fsencode(filename), 'r')
except IOError as err:
sys.stdout.write('Unable to open %s: %s\n'
% (filename, err))
return
with f:
all_lines = f.readlines()
# start and end are 1-based, all_lines is 0-based;
# so [start-1:end] as a python slice gives us [start, end] as a
# closed interval
for i, line in enumerate(all_lines[start-1:end]):
linestr = str(i+start)
# Highlight current line:
if i + start == lineno:
linestr = '>' + linestr
sys.stdout.write('%4s %s' % (linestr, line))
# ...and register the command:
PyList()
def move_in_stack(move_up):
'''Move up or down the stack (for the py-up/py-down command)'''
frame = Frame.get_selected_python_frame()
while frame:
if move_up:
iter_frame = frame.older()
else:
iter_frame = frame.newer()
if not iter_frame:
break
if iter_frame.is_evalframeex():
# Result:
if iter_frame.select():
iter_frame.print_summary()
return
frame = iter_frame
if move_up:
print 'Unable to find an older python frame'
else:
print 'Unable to find a newer python frame'
class PyUp(gdb.Command):
'Select and print the python stack frame that called this one (if any)'
def __init__(self):
gdb.Command.__init__ (self,
"py-up",
gdb.COMMAND_STACK,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
move_in_stack(move_up=True)
class PyDown(gdb.Command):
'Select and print the python stack frame called by this one (if any)'
def __init__(self):
gdb.Command.__init__ (self,
"py-down",
gdb.COMMAND_STACK,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
move_in_stack(move_up=False)
# Not all builds of gdb have gdb.Frame.select
if hasattr(gdb.Frame, 'select'):
PyUp()
PyDown()
class PyBacktraceFull(gdb.Command):
'Display the current python frame and all the frames within its call stack (if any)'
def __init__(self):
gdb.Command.__init__ (self,
"py-bt-full",
gdb.COMMAND_STACK,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
frame = Frame.get_selected_python_frame()
while frame:
if frame.is_evalframeex():
frame.print_summary()
frame = frame.older()
PyBacktraceFull()
class PyBacktrace(gdb.Command):
'Display the current python frame and all the frames within its call stack (if any)'
def __init__(self):
gdb.Command.__init__ (self,
"py-bt",
gdb.COMMAND_STACK,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
sys.stdout.write('Traceback (most recent call first):\n')
frame = Frame.get_selected_python_frame()
while frame:
if frame.is_evalframeex():
frame.print_traceback()
frame = frame.older()
PyBacktrace()
class PyPrint(gdb.Command):
'Look up the given python variable name, and print it'
def __init__(self):
gdb.Command.__init__ (self,
"py-print",
gdb.COMMAND_DATA,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
name = str(args)
frame = Frame.get_selected_python_frame()
if not frame:
print 'Unable to locate python frame'
return
pyop_frame = frame.get_pyop()
if not pyop_frame:
print 'Unable to read information on python frame'
return
pyop_var, scope = pyop_frame.get_var_by_name(name)
if pyop_var:
print ('%s %r = %s'
% (scope,
name,
pyop_var.get_truncated_repr(MAX_OUTPUT_LEN)))
else:
print '%r not found' % name
PyPrint()
class PyLocals(gdb.Command):
'Look up the given python variable name, and print it'
def __init__(self):
gdb.Command.__init__ (self,
"py-locals",
gdb.COMMAND_DATA,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
name = str(args)
frame = Frame.get_selected_python_frame()
if not frame:
print 'Unable to locate python frame'
return
pyop_frame = frame.get_pyop()
if not pyop_frame:
print 'Unable to read information on python frame'
return
for pyop_name, pyop_value in pyop_frame.iter_locals():
print ('%s = %s'
% (pyop_name.proxyval(set()),
pyop_value.get_truncated_repr(MAX_OUTPUT_LEN)))
PyLocals()
| apache-2.0 |
CasparLi/calibre | src/calibre/ebooks/textile/functions.py | 14 | 39331 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
PyTextile
A Humane Web Text Generator
"""
# Last upstream version basis
# __version__ = '2.1.4'
#__date__ = '2009/12/04'
__copyright__ = """
Copyright (c) 2011, Leigh Parry <[email protected]>
Copyright (c) 2011, John Schember <[email protected]>
Copyright (c) 2009, Jason Samsa, http://jsamsa.com/
Copyright (c) 2004, Roberto A. F. De Almeida, http://dealmeida.net/
Copyright (c) 2003, Mark Pilgrim, http://diveintomark.org/
Original PHP Version:
Copyright (c) 2003-2004, Dean Allen <[email protected]>
All rights reserved.
Thanks to Carlo Zottmann <[email protected]> for refactoring
Textile's procedural code into a class framework
Additions and fixes Copyright (c) 2006 Alex Shiels http://thresholdstate.com/
"""
__license__ = """
L I C E N S E
=============
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name Textile nor the names of its contributors may be used to
endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import re
import uuid
from urlparse import urlparse
from calibre.utils.smartypants import smartyPants
def _normalize_newlines(string):
out = re.sub(r'\r\n', '\n', string)
out = re.sub(r'\n{3,}', '\n\n', out)
out = re.sub(r'\n\s*\n', '\n\n', out)
out = re.sub(r'"$', '" ', out)
return out
def getimagesize(url):
"""
Attempts to determine an image's width and height, and returns a string
suitable for use in an <img> tag, or None in case of failure.
Requires that PIL is installed.
>>> getimagesize("http://www.google.com/intl/en_ALL/images/logo.gif")
... #doctest: +ELLIPSIS, +SKIP
'width="..." height="..."'
"""
try:
from PIL import ImageFile
except ImportError:
try:
import ImageFile
except ImportError:
return None
try:
import urllib2
except ImportError:
return None
try:
p = ImageFile.Parser()
f = urllib2.urlopen(url)
while True:
s = f.read(1024)
if not s:
break
p.feed(s)
if p.image:
return 'width="%i" height="%i"' % p.image.size
except (IOError, ValueError):
return None
class Textile(object):
hlgn = r'(?:\<(?!>)|(?<!<)\>|\<\>|\=|[()]+(?! ))'
vlgn = r'[\-^~]'
clas = r'(?:\([^)]+\))'
lnge = r'(?:\[[^\]]+\])'
styl = r'(?:\{[^}]+\})'
cspn = r'(?:\\\d+)'
rspn = r'(?:\/\d+)'
a = r'(?:%s|%s)*' % (hlgn, vlgn)
s = r'(?:%s|%s)*' % (cspn, rspn)
c = r'(?:%s)*' % '|'.join([clas, styl, lnge, hlgn])
pnct = r'[-!"#$%&()*+,/:;<=>?@\'\[\\\]\.^_`{|}~]'
# urlch = r'[\w"$\-_.+!*\'(),";/?:@=&%#{}|\\^~\[\]`]'
urlch = '[\w"$\-_.+*\'(),";\/?:@=&%#{}|\\^~\[\]`]'
url_schemes = ('http', 'https', 'ftp', 'mailto')
btag = ('bq', 'bc', 'notextile', 'pre', 'h[1-6]', 'fn\d+', 'p')
btag_lite = ('bq', 'bc', 'p')
macro_defaults = [
(re.compile(r'{(c\||\|c)}'), r'¢'), # cent
(re.compile(r'{(L-|-L)}'), r'£'), # pound
(re.compile(r'{(Y=|=Y)}'), r'¥'), # yen
(re.compile(r'{\(c\)}'), r'©'), # copyright
(re.compile(r'{\(r\)}'), r'®'), # registered
(re.compile(r'{(\+_|_\+)}'), r'±'), # plus-minus
(re.compile(r'{1/4}'), r'¼'), # quarter
(re.compile(r'{1/2}'), r'½'), # half
(re.compile(r'{3/4}'), r'¾'), # three-quarter
(re.compile(r'{(A`|`A)}'), r'À'), # A-acute
(re.compile(r'{(A\'|\'A)}'), r'Á'), # A-grave
(re.compile(r'{(A\^|\^A)}'), r'Â'), # A-circumflex
(re.compile(r'{(A~|~A)}'), r'Ã'), # A-tilde
(re.compile(r'{(A\"|\"A)}'), r'Ä'), # A-diaeresis
(re.compile(r'{(Ao|oA)}'), r'Å'), # A-ring
(re.compile(r'{(AE)}'), r'Æ'), # AE
(re.compile(r'{(C,|,C)}'), r'Ç'), # C-cedilla
(re.compile(r'{(E`|`E)}'), r'È'), # E-acute
(re.compile(r'{(E\'|\'E)}'), r'É'), # E-grave
(re.compile(r'{(E\^|\^E)}'), r'Ê'), # E-circumflex
(re.compile(r'{(E\"|\"E)}'), r'Ë'), # E-diaeresis
(re.compile(r'{(I`|`I)}'), r'Ì'), # I-acute
(re.compile(r'{(I\'|\'I)}'), r'Í'), # I-grave
(re.compile(r'{(I\^|\^I)}'), r'Î'), # I-circumflex
(re.compile(r'{(I\"|\"I)}'), r'Ï'), # I-diaeresis
(re.compile(r'{(D-|-D)}'), r'Ð'), # ETH
(re.compile(r'{(N~|~N)}'), r'Ñ'), # N-tilde
(re.compile(r'{(O`|`O)}'), r'Ò'), # O-acute
(re.compile(r'{(O\'|\'O)}'), r'Ó'), # O-grave
(re.compile(r'{(O\^|\^O)}'), r'Ô'), # O-circumflex
(re.compile(r'{(O~|~O)}'), r'Õ'), # O-tilde
(re.compile(r'{(O\"|\"O)}'), r'Ö'), # O-diaeresis
(re.compile(r'{x}'), r'×'), # dimension
(re.compile(r'{(O\/|\/O)}'), r'Ø'), # O-slash
(re.compile(r'{(U`|`U)}'), r'Ù'), # U-acute
(re.compile(r'{(U\'|\'U)}'), r'Ú'), # U-grave
(re.compile(r'{(U\^|\^U)}'), r'Û'), # U-circumflex
(re.compile(r'{(U\"|\"U)}'), r'Ü'), # U-diaeresis
(re.compile(r'{(Y\'|\'Y)}'), r'Ý'), # Y-grave
(re.compile(r'{sz}'), r'ß'), # sharp-s
(re.compile(r'{(a`|`a)}'), r'à'), # a-grave
(re.compile(r'{(a\'|\'a)}'), r'á'), # a-acute
(re.compile(r'{(a\^|\^a)}'), r'â'), # a-circumflex
(re.compile(r'{(a~|~a)}'), r'ã'), # a-tilde
(re.compile(r'{(a\"|\"a)}'), r'ä'), # a-diaeresis
(re.compile(r'{(ao|oa)}'), r'å'), # a-ring
(re.compile(r'{ae}'), r'æ'), # ae
(re.compile(r'{(c,|,c)}'), r'ç'), # c-cedilla
(re.compile(r'{(e`|`e)}'), r'è'), # e-grave
(re.compile(r'{(e\'|\'e)}'), r'é'), # e-acute
(re.compile(r'{(e\^|\^e)}'), r'ê'), # e-circumflex
(re.compile(r'{(e\"|\"e)}'), r'ë'), # e-diaeresis
(re.compile(r'{(i`|`i)}'), r'ì'), # i-grave
(re.compile(r'{(i\'|\'i)}'), r'í'), # i-acute
(re.compile(r'{(i\^|\^i)}'), r'î'), # i-circumflex
(re.compile(r'{(i\"|\"i)}'), r'ï'), # i-diaeresis
(re.compile(r'{(d-|-d)}'), r'ð'), # eth
(re.compile(r'{(n~|~n)}'), r'ñ'), # n-tilde
(re.compile(r'{(o`|`o)}'), r'ò'), # o-grave
(re.compile(r'{(o\'|\'o)}'), r'ó'), # o-acute
(re.compile(r'{(o\^|\^o)}'), r'ô'), # o-circumflex
(re.compile(r'{(o~|~o)}'), r'õ'), # o-tilde
(re.compile(r'{(o\"|\"o)}'), r'ö'), # o-diaeresis
(re.compile(r'{(o\/|\/o)}'), r'ø'), # o-stroke
(re.compile(r'{(u`|`u)}'), r'ù'), # u-grave
(re.compile(r'{(u\'|\'u)}'), r'ú'), # u-acute
(re.compile(r'{(u\^|\^u)}'), r'û'), # u-circumflex
(re.compile(r'{(u\"|\"u)}'), r'ü'), # u-diaeresis
(re.compile(r'{(y\'|\'y)}'), r'ý'), # y-acute
(re.compile(r'{(y\"|\"y)}'), r'ÿ'), # y-diaeresis
(re.compile(r'{(C\ˇ|\ˇC)}'), r'Č'), # C-caron
(re.compile(r'{(c\ˇ|\ˇc)}'), r'č'), # c-caron
(re.compile(r'{(D\ˇ|\ˇD)}'), r'Ď'), # D-caron
(re.compile(r'{(d\ˇ|\ˇd)}'), r'ď'), # d-caron
(re.compile(r'{(E\ˇ|\ˇE)}'), r'Ě'), # E-caron
(re.compile(r'{(e\ˇ|\ˇe)}'), r'ě'), # e-caron
(re.compile(r'{(L\'|\'L)}'), r'Ĺ'), # L-acute
(re.compile(r'{(l\'|\'l)}'), r'ĺ'), # l-acute
(re.compile(r'{(L\ˇ|\ˇL)}'), r'Ľ'), # L-caron
(re.compile(r'{(l\ˇ|\ˇl)}'), r'ľ'), # l-caron
(re.compile(r'{(N\ˇ|\ˇN)}'), r'Ň'), # N-caron
(re.compile(r'{(n\ˇ|\ˇn)}'), r'ň'), # n-caron
(re.compile(r'{OE}'), r'Œ'), # OE
(re.compile(r'{oe}'), r'œ'), # oe
(re.compile(r'{(R\'|\'R)}'), r'Ŕ'), # R-acute
(re.compile(r'{(r\'|\'r)}'), r'ŕ'), # r-acute
(re.compile(r'{(R\ˇ|\ˇR)}'), r'Ř'), # R-caron
(re.compile(r'{(r\ˇ|\ˇr)}'), r'ř'), # r-caron
(re.compile(r'{(S\^|\^S)}'), r'Ŝ'), # S-circumflex
(re.compile(r'{(s\^|\^s)}'), r'ŝ'), # s-circumflex
(re.compile(r'{(S\ˇ|\ˇS)}'), r'Š'), # S-caron
(re.compile(r'{(s\ˇ|\ˇs)}'), r'š'), # s-caron
(re.compile(r'{(T\ˇ|\ˇT)}'), r'Ť'), # T-caron
(re.compile(r'{(t\ˇ|\ˇt)}'), r'ť'), # t-caron
(re.compile(r'{(U\°|\°U)}'), r'Ů'), # U-ring
(re.compile(r'{(u\°|\°u)}'), r'ů'), # u-ring
(re.compile(r'{(Z\ˇ|\ˇZ)}'), r'Ž'), # Z-caron
(re.compile(r'{(z\ˇ|\ˇz)}'), r'ž'), # z-caron
(re.compile(r'{\*}'), r'•'), # bullet
(re.compile(r'{Fr}'), r'₣'), # Franc
(re.compile(r'{(L=|=L)}'), r'₤'), # Lira
(re.compile(r'{Rs}'), r'₨'), # Rupee
(re.compile(r'{(C=|=C)}'), r'€'), # euro
(re.compile(r'{tm}'), r'™'), # trademark
(re.compile(r'{spades?}'), r'♠'), # spade
(re.compile(r'{clubs?}'), r'♣'), # club
(re.compile(r'{hearts?}'), r'♥'), # heart
(re.compile(r'{diam(onds?|s)}'), r'♦'), # diamond
(re.compile(r'{"}'), r'"'), # double-quote
(re.compile(r"{'}"), r'''), # single-quote
(re.compile(r"{(’|'/|/')}"), r'’'), # closing-single-quote - apostrophe
(re.compile(r"{(‘|\\'|'\\)}"), r'‘'), # opening-single-quote
(re.compile(r'{(”|"/|/")}'), r'”'), # closing-double-quote
(re.compile(r'{(“|\\"|"\\)}'), r'“'), # opening-double-quote
]
glyph_defaults = [
(re.compile(r'(\d+\'?\"?)( ?)x( ?)(?=\d+)'), r'\1\2×\3'), # dimension sign
(re.compile(r'(\d+)\'(\s)', re.I), r'\1′\2'), # prime
(re.compile(r'(\d+)\"(\s)', re.I), r'\1″\2'), # prime-double
(re.compile(r'\b([A-Z][A-Z0-9]{2,})\b(?:[(]([^)]*)[)])'), r'<acronym title="\2">\1</acronym>'), # 3+ uppercase acronym
(re.compile(r'\b([A-Z][A-Z\'\-]+[A-Z])(?=[\s.,\)>])'), r'<span class="caps">\1</span>'), # 3+ uppercase
(re.compile(r'\b(\s{0,1})?\.{3}'), r'\1…'), # ellipsis
(re.compile(r'^[\*_-]{3,}$', re.M), r'<hr />'), # <hr> scene-break
(re.compile(r'(^|[^-])--([^-]|$)'), r'\1—\2'), # em dash
(re.compile(r'\s-(?:\s|$)'), r' – '), # en dash
(re.compile(r'\b( ?)[([]TM[])]', re.I), r'\1™'), # trademark
(re.compile(r'\b( ?)[([]R[])]', re.I), r'\1®'), # registered
(re.compile(r'\b( ?)[([]C[])]', re.I), r'\1©'), # copyright
]
def __init__(self, restricted=False, lite=False, noimage=False):
"""docstring for __init__"""
self.restricted = restricted
self.lite = lite
self.noimage = noimage
self.get_sizes = False
self.fn = {}
self.urlrefs = {}
self.shelf = {}
self.rel = ''
self.html_type = 'xhtml'
def textile(self, text, rel=None, head_offset=0, html_type='xhtml'):
"""
>>> import textile
>>> textile.textile('some textile')
u'\\t<p>some textile</p>'
"""
self.html_type = html_type
# text = unicode(text)
text = _normalize_newlines(text)
if self.restricted:
text = self.encode_html(text, quotes=False)
if rel:
self.rel = ' rel="%s"' % rel
text = self.getRefs(text)
text = self.block(text, int(head_offset))
text = self.retrieve(text)
text = smartyPants(text, 'q')
return text
def pba(self, input, element=None):
"""
Parse block attributes.
>>> t = Textile()
>>> t.pba(r'\3')
''
>>> t.pba(r'\\3', element='td')
' colspan="3"'
>>> t.pba(r'/4', element='td')
' rowspan="4"'
>>> t.pba(r'\\3/4', element='td')
' colspan="3" rowspan="4"'
>>> t.vAlign('^')
'top'
>>> t.pba('^', element='td')
' style="vertical-align:top;"'
>>> t.pba('{line-height:18px}')
' style="line-height:18px;"'
>>> t.pba('(foo-bar)')
' class="foo-bar"'
>>> t.pba('(#myid)')
' id="myid"'
>>> t.pba('(foo-bar#myid)')
' class="foo-bar" id="myid"'
>>> t.pba('((((')
' style="padding-left:4em;"'
>>> t.pba(')))')
' style="padding-right:3em;"'
>>> t.pba('[fr]')
' lang="fr"'
"""
style = []
aclass = ''
lang = ''
colspan = ''
rowspan = ''
id = ''
if not input:
return ''
matched = input
if element == 'td':
m = re.search(r'\\(\d+)', matched)
if m:
colspan = m.group(1)
m = re.search(r'/(\d+)', matched)
if m:
rowspan = m.group(1)
if element == 'td' or element == 'tr':
m = re.search(r'(%s)' % self.vlgn, matched)
if m:
style.append("vertical-align:%s;" % self.vAlign(m.group(1)))
m = re.search(r'\{([^}]*)\}', matched)
if m:
style.append(m.group(1).rstrip(';') + ';')
matched = matched.replace(m.group(0), '')
m = re.search(r'\[([^\]]+)\]', matched, re.U)
if m:
lang = m.group(1)
matched = matched.replace(m.group(0), '')
m = re.search(r'\(([^()]+)\)', matched, re.U)
if m:
aclass = m.group(1)
matched = matched.replace(m.group(0), '')
m = re.search(r'([(]+)', matched)
if m:
style.append("padding-left:%sem;" % len(m.group(1)))
matched = matched.replace(m.group(0), '')
m = re.search(r'([)]+)', matched)
if m:
style.append("padding-right:%sem;" % len(m.group(1)))
matched = matched.replace(m.group(0), '')
m = re.search(r'(%s)' % self.hlgn, matched)
if m:
style.append("text-align:%s;" % self.hAlign(m.group(1)))
m = re.search(r'^(.*)#(.*)$', aclass)
if m:
id = m.group(2)
aclass = m.group(1)
if self.restricted:
if lang:
return ' lang="%s"'
else:
return ''
result = []
if style:
result.append(' style="%s"' % "".join(style))
if aclass:
result.append(' class="%s"' % aclass)
if lang:
result.append(' lang="%s"' % lang)
if id:
result.append(' id="%s"' % id)
if colspan:
result.append(' colspan="%s"' % colspan)
if rowspan:
result.append(' rowspan="%s"' % rowspan)
return ''.join(result)
def hasRawText(self, text):
"""
checks whether the text has text not already enclosed by a block tag
>>> t = Textile()
>>> t.hasRawText('<p>foo bar biz baz</p>')
False
>>> t.hasRawText(' why yes, yes it does')
True
"""
r = re.compile(r'<(p|blockquote|div|form|table|ul|ol|pre|h\d)[^>]*?>.*</\1>', re.S).sub('', text.strip()).strip()
r = re.compile(r'<(hr|br)[^>]*?/>').sub('', r)
return '' != r
def table(self, text):
r"""
>>> t = Textile()
>>> t.table('|one|two|three|\n|a|b|c|')
'\t<table>\n\t\t<tr>\n\t\t\t<td>one</td>\n\t\t\t<td>two</td>\n\t\t\t<td>three</td>\n\t\t</tr>\n\t\t<tr>\n\t\t\t<td>a</td>\n\t\t\t<td>b</td>\n\t\t\t<td>c</td>\n\t\t</tr>\n\t</table>\n\n'
"""
text = text + "\n\n"
pattern = re.compile(r'^(?:table(_?%(s)s%(a)s%(c)s)\. ?\n)?^(%(a)s%(c)s\.? ?\|.*\|)\n\n' % {'s':self.s, 'a':self.a, 'c':self.c}, re.S|re.M|re.U)
return pattern.sub(self.fTable, text)
def fTable(self, match):
tatts = self.pba(match.group(1), 'table')
rows = []
for row in [ x for x in match.group(2).split('\n') if x]:
rmtch = re.search(r'^(%s%s\. )(.*)' % (self.a, self.c), row.lstrip())
if rmtch:
ratts = self.pba(rmtch.group(1), 'tr')
row = rmtch.group(2)
else:
ratts = ''
cells = []
for cell in row.split('|')[1:-1]:
ctyp = 'd'
if re.search(r'^_', cell):
ctyp = "h"
cmtch = re.search(r'^(_?%s%s%s\. )(.*)' % (self.s, self.a, self.c), cell)
if cmtch:
catts = self.pba(cmtch.group(1), 'td')
cell = cmtch.group(2)
else:
catts = ''
cell = self.graf(self.span(cell))
cells.append('\t\t\t<t%s%s>%s</t%s>' % (ctyp, catts, cell, ctyp))
rows.append("\t\t<tr%s>\n%s\n\t\t</tr>" % (ratts, '\n'.join(cells)))
cells = []
catts = None
return "\t<table%s>\n%s\n\t</table>\n\n" % (tatts, '\n'.join(rows))
def lists(self, text):
"""
>>> t = Textile()
>>> t.lists("* one\\n* two\\n* three")
'\\t<ul>\\n\\t\\t<li>one</li>\\n\\t\\t<li>two</li>\\n\\t\\t<li>three</li>\\n\\t</ul>'
"""
pattern = re.compile(r'^([#*]+%s .*)$(?![^#*])' % self.c, re.U|re.M|re.S)
return pattern.sub(self.fList, text)
def fList(self, match):
text = match.group(0).split("\n")
result = []
lists = []
for i, line in enumerate(text):
try:
nextline = text[i+1]
except IndexError:
nextline = ''
m = re.search(r"^([#*]+)(%s%s) (.*)$" % (self.a, self.c), line, re.S)
if m:
tl, atts, content = m.groups()
nl = ''
nm = re.search(r'^([#*]+)\s.*', nextline)
if nm:
nl = nm.group(1)
if tl not in lists:
lists.append(tl)
atts = self.pba(atts)
line = "\t<%sl%s>\n\t\t<li>%s" % (self.lT(tl), atts, self.graf(content))
else:
line = "\t\t<li>" + self.graf(content)
if len(nl) <= len(tl):
line = line + "</li>"
for k in reversed(lists):
if len(k) > len(nl):
line = line + "\n\t</%sl>" % self.lT(k)
if len(k) > 1:
line = line + "</li>"
lists.remove(k)
result.append(line)
return "\n".join(result)
def lT(self, input):
if re.search(r'^#+', input):
return 'o'
else:
return 'u'
def doPBr(self, in_):
return re.compile(r'<(p)([^>]*?)>(.*)(</\1>)', re.S).sub(self.doBr, in_)
def doBr(self, match):
if self.html_type == 'html':
content = re.sub(r'(.+)(?:(?<!<br>)|(?<!<br />))\n(?![#*\s|])', '\\1<br>', match.group(3))
else:
content = re.sub(r'(.+)(?:(?<!<br>)|(?<!<br />))\n(?![#*\s|])', '\\1<br />', match.group(3))
return '<%s%s>%s%s' % (match.group(1), match.group(2), content, match.group(4))
def block(self, text, head_offset = 0):
"""
>>> t = Textile()
>>> t.block('h1. foobar baby')
'\\t<h1>foobar baby</h1>'
"""
if not self.lite:
tre = '|'.join(self.btag)
else:
tre = '|'.join(self.btag_lite)
text = text.split('\n\n')
tag = 'p'
atts = cite = graf = ext = c1 = ''
out = []
anon = False
for line in text:
pattern = r'^(%s)(%s%s)\.(\.?)(?::(\S+))? (.*)$' % (tre, self.a, self.c)
match = re.search(pattern, line, re.S)
if match:
if ext:
out.append(out.pop() + c1)
tag, atts, ext, cite, graf = match.groups()
h_match = re.search(r'h([1-6])', tag)
if h_match:
head_level, = h_match.groups()
tag = 'h%i' % max(1,
min(int(head_level) + head_offset,
6))
o1, o2, content, c2, c1 = self.fBlock(tag, atts, ext,
cite, graf)
# leave off c1 if this block is extended,
# we'll close it at the start of the next block
if ext:
line = "%s%s%s%s" % (o1, o2, content, c2)
else:
line = "%s%s%s%s%s" % (o1, o2, content, c2, c1)
else:
anon = True
if ext or not re.search(r'^\s', line):
o1, o2, content, c2, c1 = self.fBlock(tag, atts, ext,
cite, line)
# skip $o1/$c1 because this is part of a continuing
# extended block
if tag == 'p' and not self.hasRawText(content):
line = content
else:
line = "%s%s%s" % (o2, content, c2)
else:
line = self.graf(line)
line = self.doPBr(line)
if self.html_type == 'xhtml':
line = re.sub(r'<br>', '<br />', line)
if ext and anon:
out.append(out.pop() + "\n" + line)
else:
out.append(line)
if not ext:
tag = 'p'
atts = ''
cite = ''
graf = ''
if ext:
out.append(out.pop() + c1)
return '\n\n'.join(out)
def fBlock(self, tag, atts, ext, cite, content):
"""
>>> t = Textile()
>>> t.fBlock("bq", "", None, "", "Hello BlockQuote")
('\\t<blockquote>\\n', '\\t\\t<p>', 'Hello BlockQuote', '</p>', '\\n\\t</blockquote>')
>>> t.fBlock("bq", "", None, "http://google.com", "Hello BlockQuote")
('\\t<blockquote cite="http://google.com">\\n', '\\t\\t<p>', 'Hello BlockQuote', '</p>', '\\n\\t</blockquote>')
>>> t.fBlock("bc", "", None, "", 'printf "Hello, World";') # doctest: +ELLIPSIS
('<pre>', '<code>', ..., '</code>', '</pre>')
>>> t.fBlock("h1", "", None, "", "foobar")
('', '\\t<h1>', 'foobar', '</h1>', '')
"""
atts = self.pba(atts)
o1 = o2 = c2 = c1 = ''
m = re.search(r'fn(\d+)', tag)
if m:
tag = 'p'
if m.group(1) in self.fn:
fnid = self.fn[m.group(1)]
else:
fnid = m.group(1)
atts = atts + ' id="fn%s"' % fnid
if atts.find('class=') < 0:
atts = atts + ' class="footnote"'
content = ('<sup>%s</sup>' % m.group(1)) + content
if tag == 'bq':
cite = self.checkRefs(cite)
if cite:
cite = ' cite="%s"' % cite
else:
cite = ''
o1 = "\t<blockquote%s%s>\n" % (cite, atts)
o2 = "\t\t<p%s>" % atts
c2 = "</p>"
c1 = "\n\t</blockquote>"
elif tag == 'bc':
o1 = "<pre%s>" % atts
o2 = "<code%s>" % atts
c2 = "</code>"
c1 = "</pre>"
content = self.shelve(self.encode_html(content.rstrip("\n") + "\n"))
elif tag == 'notextile':
content = self.shelve(content)
o1 = o2 = ''
c1 = c2 = ''
elif tag == 'pre':
content = self.shelve(self.encode_html(content.rstrip("\n") + "\n"))
o1 = "<pre%s>" % atts
o2 = c2 = ''
c1 = '</pre>'
else:
o2 = "\t<%s%s>" % (tag, atts)
c2 = "</%s>" % tag
content = self.graf(content)
return o1, o2, content, c2, c1
def footnoteRef(self, text):
"""
>>> t = Textile()
>>> t.footnoteRef('foo[1] ') # doctest: +ELLIPSIS
'foo<sup class="footnote"><a href="#fn...">1</a></sup> '
"""
return re.sub(r'\b\[([0-9]+)\](\s)?', self.footnoteID, text)
def footnoteID(self, match):
id, t = match.groups()
if id not in self.fn:
self.fn[id] = str(uuid.uuid4())
fnid = self.fn[id]
if not t:
t = ''
return '<sup class="footnote"><a href="#fn%s">%s</a></sup>%s' % (fnid, id, t)
def glyphs(self, text):
"""
>>> t = Textile()
>>> t.glyphs("apostrophe's")
'apostrophe’s'
>>> t.glyphs("back in '88")
'back in ’88'
>>> t.glyphs('foo ...')
'foo …'
>>> t.glyphs('--')
'—'
>>> t.glyphs('FooBar[tm]')
'FooBar™'
>>> t.glyphs("<p><cite>Cat's Cradle</cite> by Vonnegut</p>")
'<p><cite>Cat’s Cradle</cite> by Vonnegut</p>'
"""
# fix: hackish
text = re.sub(r'"\Z', '\" ', text)
result = []
for line in re.compile(r'(<.*?>)', re.U).split(text):
if not re.search(r'<.*>', line):
rules = []
if re.search(r'{.+?}', line):
rules = self.macro_defaults + self.glyph_defaults
else:
rules = self.glyph_defaults
for s, r in rules:
line = s.sub(r, line)
result.append(line)
return ''.join(result)
def macros_only(self, text):
# fix: hackish
text = re.sub(r'"\Z', '\" ', text)
result = []
for line in re.compile(r'(<.*?>)', re.U).split(text):
if not re.search(r'<.*>', line):
rules = []
if re.search(r'{.+?}', line):
rules = self.macro_defaults
for s, r in rules:
line = s.sub(r, line)
result.append(line)
return ''.join(result)
def vAlign(self, input):
d = {'^':'top', '-':'middle', '~':'bottom'}
return d.get(input, '')
def hAlign(self, input):
d = {'<':'left', '=':'center', '>':'right', '<>': 'justify'}
return d.get(input, '')
def getRefs(self, text):
"""
what is this for?
"""
pattern = re.compile(r'(?:(?<=^)|(?<=\s))\[(.+)\]((?:http(?:s?):\/\/|\/)\S+)(?=\s|$)', re.U)
text = pattern.sub(self.refs, text)
return text
def refs(self, match):
flag, url = match.groups()
self.urlrefs[flag] = url
return ''
def checkRefs(self, url):
return self.urlrefs.get(url, url)
def isRelURL(self, url):
"""
Identify relative urls.
>>> t = Textile()
>>> t.isRelURL("http://www.google.com/")
False
>>> t.isRelURL("/foo")
True
"""
(scheme, netloc) = urlparse(url)[0:2]
return not scheme and not netloc
def relURL(self, url):
scheme = urlparse(url)[0]
if self.restricted and scheme and scheme not in self.url_schemes:
return '#'
return url
def shelve(self, text):
id = str(uuid.uuid4()) + 'c'
self.shelf[id] = text
return id
def retrieve(self, text):
"""
>>> t = Textile()
>>> id = t.shelve("foobar")
>>> t.retrieve(id)
'foobar'
"""
while True:
old = text
for k, v in self.shelf.items():
text = text.replace(k, v)
if text == old:
break
return text
def encode_html(self, text, quotes=True):
a = (
('&', '&'),
('<', '<'),
('>', '>')
)
if quotes:
a = a + (
("'", '''),
('"', '"')
)
for k, v in a:
text = text.replace(k, v)
return text
def graf(self, text):
if not self.lite:
text = self.noTextile(text)
text = self.code(text)
text = self.links(text)
if not self.noimage:
text = self.image(text)
if not self.lite:
text = self.lists(text)
text = self.table(text)
text = self.span(text)
text = self.footnoteRef(text)
text = self.glyphs(text)
return text.rstrip('\n')
def links(self, text):
"""
>>> t = Textile()
>>> t.links('fooobar "Google":http://google.com/foobar/ and hello world "flickr":http://flickr.com/photos/jsamsa/ ') # doctest: +ELLIPSIS
'fooobar ... and hello world ...'
"""
text = self.macros_only(text)
punct = '!"#$%&\'*+,-./:;=?@\\^_`|~'
pattern = r'''
(?P<pre> [\s\[{(]|[%s] )?
" # start
(?P<atts> %s )
(?P<text> [^"]+? )
\s?
(?: \(([^)]+?)\)(?=") )? # $title
":
(?P<url> (?:ftp|https?)? (?: :// )? [-A-Za-z0-9+&@#/?=~_()|!:,.;]*[-A-Za-z0-9+&@#/=~_()|] )
(?P<post> [^\w\/;]*? )
(?=<|\s|$)
''' % (re.escape(punct), self.c)
text = re.compile(pattern, re.X).sub(self.fLink, text)
return text
def fLink(self, match):
pre, atts, text, title, url, post = match.groups()
if pre == None:
pre = ''
# assume ) at the end of the url is not actually part of the url
# unless the url also contains a (
if url.endswith(')') and not url.find('(') > -1:
post = url[-1] + post
url = url[:-1]
url = self.checkRefs(url)
atts = self.pba(atts)
if title:
atts = atts + ' title="%s"' % self.encode_html(title)
if not self.noimage:
text = self.image(text)
text = self.span(text)
text = self.glyphs(text)
url = self.relURL(url)
out = '<a href="%s"%s%s>%s</a>' % (self.encode_html(url), atts, self.rel, text)
out = self.shelve(out)
return ''.join([pre, out, post])
def span(self, text):
"""
>>> t = Textile()
>>> t.span(r"hello %(bob)span *strong* and **bold**% goodbye")
'hello <span class="bob">span <strong>strong</strong> and <b>bold</b></span> goodbye'
"""
qtags = (r'\*\*', r'\*', r'\?\?', r'\-', r'__', r'_', r'%', r'\+', r'~', r'\^')
pnct = ".,\"'?!;:"
for qtag in qtags:
pattern = re.compile(r"""
(?:^|(?<=[\s>%(pnct)s\(])|\[|([\]}]))
(%(qtag)s)(?!%(qtag)s)
(%(c)s)
(?::(\S+))?
([^\s%(qtag)s]+|\S[^%(qtag)s\n]*[^\s%(qtag)s\n])
([%(pnct)s]*)
%(qtag)s
(?:$|([\]}])|(?=%(selfpnct)s{1,2}|\s))
""" % {'qtag':qtag, 'c':self.c, 'pnct':pnct,
'selfpnct':self.pnct}, re.X)
text = pattern.sub(self.fSpan, text)
return text
def fSpan(self, match):
_, tag, atts, cite, content, end, _ = match.groups()
qtags = {
'*': 'strong',
'**': 'b',
'??': 'cite',
'_' : 'em',
'__': 'i',
'-' : 'del',
'%' : 'span',
'+' : 'ins',
'~' : 'sub',
'^' : 'sup'
}
tag = qtags[tag]
atts = self.pba(atts)
if cite:
atts = atts + 'cite="%s"' % cite
content = self.span(content)
out = "<%s%s>%s%s</%s>" % (tag, atts, content, end, tag)
return out
def image(self, text):
"""
>>> t = Textile()
>>> t.image('!/imgs/myphoto.jpg!:http://jsamsa.com')
'<a href="http://jsamsa.com"><img src="/imgs/myphoto.jpg" alt="" /></a>'
"""
pattern = re.compile(r"""
(?:[\[{])? # pre
\! # opening !
(%s) # optional style,class atts
(?:\. )? # optional dot-space
([^\s(!]+) # presume this is the src
\s? # optional space
(?:\(([^\)]+)\))? # optional title
\! # closing
(?::(\S+))? # optional href
(?:[\]}]|(?=\s|$)) # lookahead: space or end of string
""" % self.c, re.U|re.X)
return pattern.sub(self.fImage, text)
def fImage(self, match):
# (None, '', '/imgs/myphoto.jpg', None, None)
atts, url, title, href = match.groups()
atts = self.pba(atts)
if title:
atts = atts + ' title="%s" alt="%s"' % (title, title)
else:
atts = atts + ' alt=""'
if not self.isRelURL(url) and self.get_sizes:
size = getimagesize(url)
if (size):
atts += " %s" % size
if href:
href = self.checkRefs(href)
url = self.checkRefs(url)
url = self.relURL(url)
out = []
if href:
out.append('<a href="%s" class="img">' % href)
if self.html_type == 'html':
out.append('<img src="%s"%s>' % (url, atts))
else:
out.append('<img src="%s"%s />' % (url, atts))
if href:
out.append('</a>')
return ''.join(out)
def code(self, text):
text = self.doSpecial(text, '<code>', '</code>', self.fCode)
text = self.doSpecial(text, '@', '@', self.fCode)
text = self.doSpecial(text, '<pre>', '</pre>', self.fPre)
return text
def fCode(self, match):
before, text, after = match.groups()
if after == None:
after = ''
# text needs to be escaped
if not self.restricted:
text = self.encode_html(text)
return ''.join([before, self.shelve('<code>%s</code>' % text), after])
def fPre(self, match):
before, text, after = match.groups()
if after == None:
after = ''
# text needs to be escapedd
if not self.restricted:
text = self.encode_html(text)
return ''.join([before, '<pre>', self.shelve(text), '</pre>', after])
def doSpecial(self, text, start, end, method=None):
if method == None:
method = self.fSpecial
pattern = re.compile(r'(^|\s|[\[({>])%s(.*?)%s(\s|$|[\])}])?' % (re.escape(start), re.escape(end)), re.M|re.S)
return pattern.sub(method, text)
def fSpecial(self, match):
"""
special blocks like notextile or code
"""
before, text, after = match.groups()
if after == None:
after = ''
return ''.join([before, self.shelve(self.encode_html(text)), after])
def noTextile(self, text):
text = self.doSpecial(text, '<notextile>', '</notextile>', self.fTextile)
return self.doSpecial(text, '==', '==', self.fTextile)
def fTextile(self, match):
before, notextile, after = match.groups()
if after == None:
after = ''
return ''.join([before, self.shelve(notextile), after])
def textile(text, head_offset=0, html_type='xhtml', encoding=None, output=None):
"""
this function takes additional parameters:
head_offset - offset to apply to heading levels (default: 0)
html_type - 'xhtml' or 'html' style tags (default: 'xhtml')
"""
return Textile().textile(text, head_offset=head_offset,
html_type=html_type)
def textile_restricted(text, lite=True, noimage=True, html_type='xhtml'):
"""
Restricted version of Textile designed for weblog comments and other
untrusted input.
Raw HTML is escaped.
Style attributes are disabled.
rel='nofollow' is added to external links.
When lite=True is set (the default):
Block tags are restricted to p, bq, and bc.
Lists and tables are disabled.
When noimage=True is set (the default):
Image tags are disabled.
"""
return Textile(restricted=True, lite=lite,
noimage=noimage).textile(text, rel='nofollow',
html_type=html_type)
| gpl-3.0 |
kaichogami/scikit-learn | sklearn/utils/multiclass.py | 40 | 12966 |
# Author: Arnaud Joly, Joel Nothman, Hamzeh Alsalhi
#
# License: BSD 3 clause
"""
Multi-class / multi-label utility function
==========================================
"""
from __future__ import division
from collections import Sequence
from itertools import chain
from scipy.sparse import issparse
from scipy.sparse.base import spmatrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
import numpy as np
from ..externals.six import string_types
from .validation import check_array
from ..utils.fixes import bincount
from ..utils.fixes import array_equal
def _unique_multiclass(y):
if hasattr(y, '__array__'):
return np.unique(np.asarray(y))
else:
return set(y)
def _unique_indicator(y):
return np.arange(check_array(y, ['csr', 'csc', 'coo']).shape[1])
_FN_UNIQUE_LABELS = {
'binary': _unique_multiclass,
'multiclass': _unique_multiclass,
'multilabel-indicator': _unique_indicator,
}
def unique_labels(*ys):
"""Extract an ordered array of unique labels
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes,
Returns
-------
out : numpy array of shape [n_unique_labels]
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
"""
if not ys:
raise ValueError('No argument has been passed.')
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == set(["binary", "multiclass"]):
ys_types = set(["multiclass"])
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (label_type == "multilabel-indicator" and
len(set(check_array(y, ['csr', 'csc', 'coo']).shape[1]
for y in ys)) > 1):
raise ValueError("Multi-label binary indicator input with "
"different numbers of labels")
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type: %s" % repr(ys))
ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))
# Check that we don't mix string type with number type
if (len(set(isinstance(label, string_types) for label in ys_labels)) > 1):
raise ValueError("Mix of label input types (string and number)")
return np.array(sorted(ys_labels))
def _is_integral_float(y):
return y.dtype.kind == 'f' and np.all(y.astype(int) == y)
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel([[1], [0, 2], []])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
"""
if hasattr(y, '__array__'):
y = np.asarray(y)
if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1):
return False
if issparse(y):
if isinstance(y, (dok_matrix, lil_matrix)):
y = y.tocsr()
return (len(y.data) == 0 or np.unique(y.data).size == 1 and
(y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(np.unique(y.data))))
else:
labels = np.unique(y)
return len(labels) < 3 and (y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(labels))
def check_classification_targets(y):
"""Ensure that target y is of a non-regression type.
Only the following target types (as defined in type_of_target) are allowed:
'binary', 'multiclass', 'multiclass-multioutput',
'multilabel-indicator', 'multilabel-sequences'
Parameters
----------
y : array-like
"""
y_type = type_of_target(y)
if y_type not in ['binary', 'multiclass', 'multiclass-multioutput',
'multilabel-indicator', 'multilabel-sequences']:
raise ValueError("Unknown label type: %r" % y_type)
def type_of_target(y):
"""Determine the type of data indicated by target `y`
Parameters
----------
y : array-like
Returns
-------
target_type : string
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, sequence of sequences, or an array of non-sequence objects.
Examples
--------
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1.0, 2.0])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target([1.0, 0.0, 3.0])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target([[1, 2]])
'multiclass-multioutput'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))
and not isinstance(y, string_types))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
if is_multilabel(y):
return 'multilabel-indicator'
try:
y = np.asarray(y)
except ValueError:
# Known to fail in numpy 1.3 for array of arrays
return 'unknown'
# The old sequence of sequences format
try:
if (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)
and not isinstance(y[0], string_types)):
raise ValueError('You appear to be using a legacy multi-label data'
' representation. Sequence of sequences are no'
' longer supported; use a binary array or sparse'
' matrix instead.')
except IndexError:
pass
# Invalid inputs
if y.ndim > 2 or (y.dtype == object and len(y) and
not isinstance(y.flat[0], string_types)):
return 'unknown' # [[[1, 2]]] or [obj_1] and not ["label_1"]
if y.ndim == 2 and y.shape[1] == 0:
return 'unknown' # [[]]
if y.ndim == 2 and y.shape[1] > 1:
suffix = "-multioutput" # [[1, 2], [1, 2]]
else:
suffix = "" # [1, 2, 3] or [[1], [2], [3]]
# check float and contains non-integer float values
if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
# [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]
return 'continuous' + suffix
if (len(np.unique(y)) > 2) or (y.ndim >= 2 and len(y[0]) > 1):
return 'multiclass' + suffix # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]
else:
return 'binary' # [1, 2] or [["a"], ["b"]]
def _check_partial_fit_first_call(clf, classes=None):
"""Private helper function for factorizing common classes param logic
Estimators that implement the ``partial_fit`` API need to be provided with
the list of possible classes at the first call to partial_fit.
Subsequent calls to partial_fit should check that ``classes`` is still
consistent with a previous value of ``clf.classes_`` when provided.
This function returns True if it detects that this was the first call to
``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also
set on ``clf``.
"""
if getattr(clf, 'classes_', None) is None and classes is None:
raise ValueError("classes must be passed on the first call "
"to partial_fit.")
elif classes is not None:
if getattr(clf, 'classes_', None) is not None:
if not array_equal(clf.classes_, unique_labels(classes)):
raise ValueError(
"`classes=%r` is not the same as on last call "
"to partial_fit, was: %r" % (classes, clf.classes_))
else:
# This is the first call to partial_fit
clf.classes_ = unique_labels(classes)
return True
# classes is None and clf.classes_ has already previously been set:
# nothing to do
return False
def class_distribution(y, sample_weight=None):
"""Compute class priors from multioutput-multiclass target data
Parameters
----------
y : array like or sparse matrix of size (n_samples, n_outputs)
The labels for each example.
sample_weight : array-like of shape = (n_samples,), optional
Sample weights.
Returns
-------
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
n_classes : list of integers of size n_outputs
Number of classes in each column
class_prior : list of size n_outputs of arrays of size (n_classes,)
Class distribution of each column.
"""
classes = []
n_classes = []
class_prior = []
n_samples, n_outputs = y.shape
if issparse(y):
y = y.tocsc()
y_nnz = np.diff(y.indptr)
for k in range(n_outputs):
col_nonzero = y.indices[y.indptr[k]:y.indptr[k + 1]]
# separate sample weights for zero and non-zero elements
if sample_weight is not None:
nz_samp_weight = np.asarray(sample_weight)[col_nonzero]
zeros_samp_weight_sum = (np.sum(sample_weight) -
np.sum(nz_samp_weight))
else:
nz_samp_weight = None
zeros_samp_weight_sum = y.shape[0] - y_nnz[k]
classes_k, y_k = np.unique(y.data[y.indptr[k]:y.indptr[k + 1]],
return_inverse=True)
class_prior_k = bincount(y_k, weights=nz_samp_weight)
# An explicit zero was found, combine its weight with the weight
# of the implicit zeros
if 0 in classes_k:
class_prior_k[classes_k == 0] += zeros_samp_weight_sum
# If an there is an implicit zero and it is not in classes and
# class_prior, make an entry for it
if 0 not in classes_k and y_nnz[k] < y.shape[0]:
classes_k = np.insert(classes_k, 0, 0)
class_prior_k = np.insert(class_prior_k, 0,
zeros_samp_weight_sum)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior.append(class_prior_k / class_prior_k.sum())
else:
for k in range(n_outputs):
classes_k, y_k = np.unique(y[:, k], return_inverse=True)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior_k = bincount(y_k, weights=sample_weight)
class_prior.append(class_prior_k / class_prior_k.sum())
return (classes, n_classes, class_prior)
| bsd-3-clause |
h2oai/h2o | py/testdir_single_jvm/test_GLM2_gaussian_rand2.py | 9 | 1837 | import unittest, random, sys, time
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_glm, h2o_import as h2i
def define_params():
paramDict = {
'standardize': [None, 0,1],
'beta_epsilon': [None, 0.0001],
'ignored_cols': [None, 0, 1, 15, 33, 34],
'family': ['gaussian'],
'n_folds': [2, 3, 4, 9],
'lambda': [None, 0, 1e-8, 1e-4],
'alpha': [None, 0, 0.5, 0.75],
'beta_epsilon': [None, 0.0001],
'max_iter': [None, 10],
}
return paramDict
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
h2o.init()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_GLM2_gaussian_rand2(self):
csvPathname = 'standard/covtype.data'
parseResult = h2i.import_parse(bucket='home-0xdiag-datasets', path=csvPathname, schema='put')
paramDict = define_params()
for trial in range(20):
# params is mutable. This is default.
params = {
'response': 54,
'n_folds': 3,
'family': "gaussian",
'alpha': 0.5,
'lambda': 1e-4,
'max_iter': 30
}
colX = h2o_glm.pickRandGlmParams(paramDict, params)
kwargs = params.copy()
start = time.time()
glm = h2o_cmd.runGLM(timeoutSecs=300, parseResult=parseResult, **kwargs)
print "glm end on ", csvPathname, 'took', time.time() - start, 'seconds'
h2o_glm.simpleCheckGLM(self, glm, None, **kwargs)
print "Trial #", trial, "completed\n"
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
ONEcampaign/humanitarian-data-service | displacement_tracker_data.py | 1 | 27157 | import requests
import pandas as pd
import os.path
import resources.constants
import json
from pandas.io.json import json_normalize
from utils.data_utils import get_ordinal_number
"""
This script aggregates data from multiple endpoints and returns a single .json file containing all data
used in the displacement tracker project.
Scheduling this script would mean that the /displacement_tracker endpoint always returned the latest data
contained within the Humanitarian Data Service API.
"""
# For development
#ROOT = 'http://localhost:5000'
# For live
ROOT = 'http://ec2-34-200-18-111.compute-1.amazonaws.com'
# Set year for country-level funding data
FUNDING_YEAR = 2016
# Define all endpoints
URL_POPULATIONS_REFUGEELIKE_ASYLUM = '/populations/refugeelike/asylum/index'
URL_POPULATIONS_REFUGEELIKE_ORIGIN = '/populations/refugeelike/origin/index'
URL_INDICATORS_GNI = '/indicators/gni/index'
URL_PLANS_PROGRESS = '/funding/plans/progress/index'
URL_POPULATION = '/populations/totals/index'
URL_FRAGILE_STATE = '/fragility/fragile-state-index/index'
URL_NEEDS = '/needs/plans/index'
URL_FUNDING_DEST_COUNTRY = '/funding/countries/destination/index/{}'.format(FUNDING_YEAR)
URL_FUNDING_DEST_DONORS = '/funding/countries/donors/index'
# Define path for raw country names data
country_names_path = os.path.join(resources.constants.EXAMPLE_RAW_DATA_PATH, 'UNSD Methodology.csv')
# Define path for relatable geography populations data
relatable_population_path = os.path.join(resources.constants.EXAMPLE_DERIVED_DATA_PATH, '2017_relatable_population_rankings.csv')
# Define path for stories of displacement
displacement_stories_path = os.path.join(resources.constants.EXAMPLE_DERIVED_DATA_PATH, 'stories_of_displacement_links.csv')
# Create a blank dictionary to store metadata for each field
metadata_dict = {}
def merge_data(
funding_year = FUNDING_YEAR,
country_names_path=country_names_path,
relatable_population_path=relatable_population_path,
displacement_stories_path=displacement_stories_path,
url_populations_refugeelike_asylum=(ROOT + URL_POPULATIONS_REFUGEELIKE_ASYLUM),
url_populations_refugeelike_origin=(ROOT + URL_POPULATIONS_REFUGEELIKE_ORIGIN),
url_indicators_gni=(ROOT + URL_INDICATORS_GNI),
url_plans_progress=(ROOT + URL_PLANS_PROGRESS),
url_population=(ROOT + URL_POPULATION),
url_fragile_state=(ROOT + URL_FRAGILE_STATE),
url_needs=(ROOT + URL_NEEDS),
url_funding_dest_country=(ROOT + URL_FUNDING_DEST_COUNTRY),
url_funding_dest_donors=(ROOT + URL_FUNDING_DEST_DONORS)
):
#################### COUNTRY NAMES ####################
# Get the data from .csv
df_country_names = pd.read_csv(country_names_path, encoding='utf-8')
# Select relevant fields
df_country_names = df_country_names[[
'Country or Area',
'ISO-alpha3 Code'
]]
# Add Taiwan
df_country_names.loc[-1] = ["Taiwan", "TWN"]
# Drop null values
df_country_names = df_country_names.dropna()
# Set country code to be the index
df_country_names = df_country_names.set_index('ISO-alpha3 Code')
# Rename fields
df_country_names.rename(columns={'Country or Area': 'Country'}, inplace=True)
#################### DISPLACEMENT STORIES ####################
# Get the data from .csv
df_displacement_stories = pd.read_csv(displacement_stories_path, encoding='utf-8')
# Set country code to be the index
df_displacement_stories = df_displacement_stories.set_index('countryCode')
# Select relevant fields
df_displacement_stories = df_displacement_stories[[
'storyTitle', 'storySource',
'storyTagLine', 'storyURL'
]]
# Drop null values
df_displacement_stories = df_displacement_stories.dropna()
# Add metadata for each field to overall metadata dictionary
for column in df_displacement_stories.columns:
metadata_dict[column] = {}
#################### POPULATIONS ####################
# Get the data from the API
population_data = requests.get(url_population).json()
# Extract metadata
if 'metadata' in population_data:
population_metadata = population_data['metadata']
else:
population_metadata = {}
# Build dataframe
df_population = pd.DataFrame(population_data['data']).T
# Select relevant fields
df_population = df_population[[
'PopTotal'
]]
# Rename fields
df_population.rename(columns={'PopTotal': 'Population'}, inplace=True)
# Drop null values
df_population = df_population.dropna()
# Add metadata for each field to overall metadata dictionary
for column in df_population.columns:
metadata_dict[column] = population_metadata
#################### FRAGILE STATE ####################
# Get the data from the API
fragile_state_data = requests.get(url_fragile_state).json()
# Extract metadata
if 'metadata' in fragile_state_data:
fragile_state_metadata = fragile_state_data['metadata']
else:
fragile_state_metadata = {}
# Build a dataframe
df_fragile_state = pd.DataFrame(fragile_state_data['data']).T
# Select relevant fields
df_fragile_state = df_fragile_state[[
'Total', 'Rank'
]]
# Rename fields
df_fragile_state.rename(columns={'Total': 'Fragile State Index Score',
'Rank': 'Fragile State Index Rank'}, inplace=True)
# Drop null values
df_fragile_state = df_fragile_state.dropna()
# Add metadata for each field to overall metadata dictionary
for column in df_fragile_state.columns:
metadata_dict[column] = fragile_state_metadata
#################### POPULATIONS_REFUGEELIKE_ASYLUM ####################
# Get the data from the API
populations_refugeelike_asylum_data = requests.get(url_populations_refugeelike_asylum).json()
# Extract metadata
if 'metadata' in populations_refugeelike_asylum_data:
populations_refugeelike_asylum_metadata = populations_refugeelike_asylum_data['metadata']
else:
populations_refugeelike_asylum_metadata = {}
# Build a dataframe
df_populations_refugeelike_asylum = pd.DataFrame(populations_refugeelike_asylum_data['data']).T
# Select relevant fields
df_populations_refugeelike_asylum = df_populations_refugeelike_asylum[[
'Total population of concern', 'Total Refugee and people in refugee-like situations',
'IDPs protected/assisted by UNHCR, incl. people in IDP-like situations','Asylum-seekers'
]]
# Rename fields
df_populations_refugeelike_asylum.rename(columns={
'IDPs protected/assisted by UNHCR, incl. people in IDP-like situations': 'IDPs protected/assisted by UNHCR',
'Asylum-seekers': 'Asylum-seekers (asylum)'
}, inplace=True)
# Add field to rank total total population of concern
df_populations_refugeelike_asylum['Rank of total population of concern'] = df_populations_refugeelike_asylum[
'Total population of concern'].rank(ascending=False, method='min').astype(int)
# Add field to add refugees and asylum-seekers
df_populations_refugeelike_asylum['Total refugees and asylum-seekers (asylum)'] = df_populations_refugeelike_asylum[
'Total Refugee and people in refugee-like situations'] + df_populations_refugeelike_asylum['Asylum-seekers (asylum)']
# Drop null values
df_populations_refugeelike_asylum = df_populations_refugeelike_asylum.dropna()
# Add metadata for each field to overall metadata dictionary
for column in df_populations_refugeelike_asylum.columns:
metadata_dict[column] = populations_refugeelike_asylum_metadata
#################### POPULATIONS_REFUGEELIKE_ORIGIN ####################
# Get the data from the API
populations_refugeelike_origin_data = requests.get(url_populations_refugeelike_origin).json()
# Extract metadata
if 'metadata' in populations_refugeelike_origin_data:
populations_refugeelike_origin_metadata = populations_refugeelike_origin_data['metadata']
else:
populations_refugeelike_origin_metadata = {}
# Build a dataframe
df_populations_refugeelike_origin = pd.DataFrame(populations_refugeelike_origin_data['data']).T
# Select relevant fields
df_populations_refugeelike_origin = df_populations_refugeelike_origin[[
'Total Refugee and people in refugee-like situations', 'Asylum-seekers'
]]
# Rename fields
df_populations_refugeelike_origin.rename(columns={
'Total Refugee and people in refugee-like situations': 'Total refugees who have fled from country',
'Asylum-seekers': 'Asylum-seekers (origin)'
}, inplace=True)
# Add field to add refugees and asylum-seekers
df_populations_refugeelike_origin['Total refugees and asylum-seekers (origin)'] = df_populations_refugeelike_origin[
'Total refugees who have fled from country'] + df_populations_refugeelike_origin['Asylum-seekers (origin)']
# Drop null values
df_populations_refugeelike_origin = df_populations_refugeelike_origin.dropna()
# Add metadata for each field to overall metadata dictionary
for column in df_populations_refugeelike_origin.columns:
metadata_dict[column] = populations_refugeelike_origin_metadata
#################### INDICATORS GNI ####################
# Get the data from the API
indicators_gni_data = requests.get(url_indicators_gni).json()
# Extract metadata
if 'metadata' in indicators_gni_data:
indicators_gni_metadata = indicators_gni_data['metadata']
else:
indicators_gni_metadata = {}
# Build a dataframe
df_indicators_gni = pd.DataFrame(indicators_gni_data['data']).T
# Select relevant fields
df_indicators_gni = df_indicators_gni[[
'2015'
]]
# Rename fields
df_indicators_gni.rename(columns={'2015': 'GDP Per Capita'}, inplace=True)
# Drop null values
df_indicators_gni = df_indicators_gni.dropna()
# Add metadata for each field to overall metadata dictionary
for column in df_indicators_gni.columns:
metadata_dict[column] = indicators_gni_metadata
#################### PLANS PROGRESS ####################
# Get the data from the API
plans_progress_data = requests.get(url_plans_progress).json()
# Extract metadata
if 'metadata' in plans_progress_data:
plans_progress_metadata = plans_progress_data['metadata']
else:
plans_progress_metadata = {}
# Build a dataframe
df_plans_progress = pd.DataFrame(plans_progress_data['data']).T
# Select relevant fields
df_plans_progress = df_plans_progress[[
'appealFunded', 'revisedRequirements', 'neededFunding'
]]
# Rename fields
df_plans_progress.rename(columns={'appealFunded': 'Appeal funds committed to date',
'revisedRequirements': 'Appeal funds requested',
'neededFunding': 'Appeal funds still needed'}, inplace=True)
df_plans_progress['Appeal percent funded'] = df_plans_progress['Appeal funds committed to date']/df_plans_progress['Appeal funds requested']
# Drop null values
df_plans_progress = df_plans_progress.dropna()
# Add metadata for each field to overall metadata dictionary
for column in df_plans_progress.columns:
metadata_dict[column] = plans_progress_metadata
# Add an FTS data as-of date so it can be included in the .csv data dump
df_plans_progress['FTS funding data as-of date'] = plans_progress_data['metadata']['source_data']
######## FUNDING BY DESTINATION COUNTRY ############
#Get the data from the API
funding_dest_country_data = requests.get(url_funding_dest_country).json()
# Extract metadata
if 'metadata' in funding_dest_country_data:
funding_dest_country_metadata = funding_dest_country_data['metadata']
else:
funding_dest_country_metadata = {}
# Build a dataframe
df_funding_dest_country = pd.DataFrame(funding_dest_country_data['data']).T
# Select relevant fields
df_funding_dest_country = df_funding_dest_country[[
'totalFunding'
]]
# Keep only records where totalFunding > 0
df_funding_dest_country = df_funding_dest_country[df_funding_dest_country['totalFunding'] > 0]
# Rename fields
df_funding_dest_country.rename(columns={'totalFunding': 'Humanitarian aid received'},
inplace=True)
# Add field to rank total total population of concern
df_funding_dest_country['Rank of humanitarian aid received'] = df_funding_dest_country[
'Humanitarian aid received'].rank(ascending=False, method='min').astype(int)
# Drop null values
df_funding_dest_country = df_funding_dest_country.dropna()
# Add metadata for each field to overall metadata dictionary
for column in df_funding_dest_country.columns:
metadata_dict[column] = funding_dest_country_metadata
################## TOP 5 DONORS TO EACH DESTINATION COUNTRY ###################
#Get the data from the API
funding_dest_donors_data = requests.get(url_funding_dest_donors).json()
# Extract metadata
if 'metadata' in funding_dest_donors_data:
funding_dest_donors_metadata = funding_dest_donors_data['metadata']
else:
funding_dest_donors_metadata = {}
# Build a dataframe
df_funding_dest_donors = json_normalize(funding_dest_donors_data['data']).T
#df_funding_dest_donors = pd.DataFrame(funding_dest_donors_data['data']).T
df_funding_dest_donors.columns = (['Top 5 Donors'])
# Add metadata for each field to overall metadata dictionary
for column in df_funding_dest_donors.columns:
metadata_dict[column] = funding_dest_donors_metadata
#################### NEEDS ####################
# Get the data from the API
needs_data = requests.get(url_needs).json()
# Extract metadata
if 'metadata' in needs_data:
needs_metadata = needs_data['metadata']
else:
needs_metadata = {}
# Build a dataframe
df_needs = pd.DataFrame(needs_data['data']).T
# Exclude rows where country code is missing
df_needs = df_needs.drop('null')
# Select relevant fields
df_needs = df_needs[[
'inNeedTotal', 'inNeedHealth', 'inNeedEducation',
'inNeedFoodSecurity', 'inNeedProtection', 'sourceURL',
'inNeedShelter-CCCM-NFI', 'inNeedWASH', 'sourceType'
]]
# Rename fields
df_needs.rename(columns={'inNeedTotal': 'Total people in need',
'inNeedHealth': 'People in need of health support',
'inNeedEducation': 'Children in need of education',
'inNeedFoodSecurity': 'People who are food insecure',
'inNeedProtection': 'People in need of protection',
'inNeedShelter-CCCM-NFI': 'People in need of shelter',
'inNeedWASH': 'People in need of water, sanitization & hygiene',
'sourceURL': 'Source of needs data',
'sourceType': 'Source type of needs data'
}, inplace=True)
# Add metadata for each field to overall metadata dictionary
for column in df_needs.columns:
metadata_dict[column] = needs_metadata
######## FIND PLACES WITH SIMILAR POPULATIONS TO PEOPLE IN NEED ########
# Get the relateable populations data from .csv
df_relatable_populations = pd.read_csv(relatable_population_path)
df_relatable_populations['Population'] = df_relatable_populations[[
'Population - World Bank (2015)','Population - UNFPA (2016)'
]].max(axis=1)
df_relatable_populations = df_relatable_populations[['City, State, Country','Population']].dropna()
def find_nearest_place_population(reference_value):
if reference_value:
nearest_row = df_relatable_populations.iloc[(df_relatable_populations['Population']- reference_value).abs().argsort()[0]]
nearest_population = nearest_row['Population']
else:
nearest_population = 0.00
return nearest_population
def find_nearest_place(reference_value):
if reference_value:
nearest_row = df_relatable_populations.iloc[(df_relatable_populations['Population']- reference_value).abs().argsort()[0]]
nearest_place = nearest_row['City, State, Country']
else:
nearest_place = ''
return nearest_place
df_needs['Place with similar population as people in need'] = df_needs['Total people in need'].apply(
find_nearest_place)
# Add metadata
metadata_dict['Place with similar population as people in need'] = {}
df_needs['Population of place with similar population'] = df_needs['Total people in need'].apply(
find_nearest_place_population)
# Add metadata
metadata_dict['Population of place with similar population'] = {}
#################### SAMPLE CLUSTERS ####################
# Build a dataframe
# df_clusters = pd.read_json('sample_clusters.json').T
# df_clusters = df_clusters[['clusters']]
################# COMBINE ALL DATA ##############
# Make a list of all dataframes
all_dataframes = [
df_country_names,
df_populations_refugeelike_asylum,
df_indicators_gni,
df_plans_progress,
df_population,
df_fragile_state,
df_needs,
df_funding_dest_country,
df_funding_dest_donors,
df_displacement_stories,
df_populations_refugeelike_origin
# df_clusters
]
df_final = pd.concat(all_dataframes, axis=1)
# Add calculation for displaced people as a ratio of total population
df_final['Population of concern per 1000 population'] = (df_final['Total population of concern'] / df_final[
'Population'])*1000
# And metadata
metadata_dict['Population of concern per 1000 population'] = {}
metadata_dict['Population of concern per 1000 population']['Calculation'] = '(Total population of concern / Population) * 1000'
# Add calculation for displaced people per million GDP
df_final['Population of concern per million GDP'] = ((df_final['Total population of concern'] * 1000000) / (df_final[
'GDP Per Capita'] * df_final['Population']))
# And metadata
metadata_dict['Population of concern per million GDP'] = {}
metadata_dict['Population of concern per million GDP']['Calculation'] = '(Total population of concern] * 1000000) / (GDP Per Capita * Population)'
# Add field to specify whether country has current humanitarian appeal in FTS
df_final['Country has current appeal'] = df_final['Appeal funds requested'].notnull()
# And metadata
metadata_dict['Country has current appeal'] = {}
metadata_dict['Country has current appeal']['Calculation'] = 'Is Appeal funds requested not null'
# Make the ranked variables ordinal
def get_ordinal_number(value):
try:
value = int(value)
except ValueError:
return value
if value % 100 // 10 != 1:
if value % 10 == 1:
ordval = u"%d%s" % (value, "st")
elif value % 10 == 2:
ordval = u"%d%s" % (value, "nd")
elif value % 10 == 3:
ordval = u"%d%s" % (value, "rd")
else:
ordval = u"%d%s" % (value, "th")
else:
ordval = u"%d%s" % (value, "th")
return ordval
df_final['Rank of total population of concern'] = df_final['Rank of total population of concern'].apply(
get_ordinal_number)
df_final['Rank of humanitarian aid received'] = df_final['Rank of humanitarian aid received'].apply(
get_ordinal_number)
################## STRUCTURE DICTIONARY ##################
# Clean up NaN values
df_final = df_final.fillna('')
# Transform dataframe to dictionary
df_as_dict = df_final.to_dict(orient='index')
# Define field names for each strand
strand_01_fields = ['Appeal funds still needed', 'Appeal funds requested', 'Appeal funds committed to date',
'Appeal percent funded', 'Source of needs data', 'Source type of needs data',
'Total people in need', 'Place with similar population as people in need',
'Population of place with similar population']
strand_02_fields = ['Population of concern per 1000 population', 'Fragile State Index Score',
'Total population of concern',
'IDPs protected/assisted by UNHCR',
'GDP Per Capita',
'Total refugees and asylum-seekers (asylum)',
'Total refugees and asylum-seekers (origin)']
strand_03_fields = ['Humanitarian aid received', 'Appeal funds requested', 'Appeal percent funded',
'Rank of total population of concern', 'Rank of humanitarian aid received']
needs_fields = ['People in need of health support','Children in need of education',
'People who are food insecure','People in need of protection','People in need of shelter',
'People in need of water, sanitization & hygiene']
story_fields = ['storyTitle', 'storySource', 'storyTagLine', 'storyURL']
# For every object, get / group the values by strand
data = {}
for x in df_as_dict.keys():
# Create an empty dict
country_dict = {}
# Populate the dict with those value that don't require nesting
country_dict['Country'] = df_as_dict[x]['Country']
country_dict['Fragile State Index Rank'] = df_as_dict[x]['Fragile State Index Rank']
country_dict['Country has current appeal'] = df_as_dict[x]['Country has current appeal']
# Populate the dict with story fields
story_fields_dict = {}
if df_as_dict[x]['storyURL']:
for field in story_fields:
story_fields_dict[field] = (df_as_dict[x][field])
country_dict['Displacement_story'] = story_fields_dict
# Populate the dict with strand 1 data if the country has a current appeal
strand_01_dict = {}
if df_as_dict[x]['Country has current appeal']:
strand_01_dict['Needs_Data'] = {}
for names_01 in strand_01_fields:
strand_01_dict[names_01] = (df_as_dict[x][names_01])
for name in needs_fields:
if df_as_dict[x][name] != '':
strand_01_dict['Needs_Data'][name] = (df_as_dict[x][name])
country_dict['Strand_01_Needs'] = strand_01_dict
# Populate the dict with strand 2 data
strand_02_dict = {}
for names_02 in strand_02_fields:
strand_02_dict[names_02] = (df_as_dict[x][names_02])
country_dict['Strand_02_People'] = strand_02_dict
# Populate the dict with strand 3 data
strand_03_dict = {}
strand_03_dict['Top 5 donors of humanitarian aid'] = []
for names_03 in strand_03_fields:
strand_03_dict[names_03] = (df_as_dict[x][names_03])
if df_as_dict[x]['Top 5 Donors']:
strand_03_dict['Top 5 donors of humanitarian aid'] = df_as_dict[x]['Top 5 Donors']
country_dict['Strand_03_Aid'] = strand_03_dict
# Add the country dict to the data dict
data[x] = country_dict
# Add World totals
# Create an empty dict
world_dict = {}
# Populate the dict with aggregated strand 1 data
strand_01_dict = {}
strand_01_dict['Needs_Data'] = {}
strand_01_dict['Total people in need'] = df_needs['Total people in need'].sum()
strand_01_dict['Count of current crises with people in need'] = df_needs['Total people in need'].count()
strand_01_dict['Place with similar population as people in need'] = find_nearest_place(
df_needs['Total people in need'].sum()
)
strand_01_dict['Population of place with similar population'] = find_nearest_place_population(
df_needs['Total people in need'].sum()
)
for name in needs_fields:
strand_01_dict['Needs_Data'][name] = df_needs[name].sum()
world_dict['Strand_01_Needs'] = strand_01_dict
# Add the world dict to the data dict
data['WORLD'] = world_dict
# Create the metadata dict
metadata = {}
# Populate the dict with those value that don't require nesting
#metadata['Country'] = metadata_dict['Country']
metadata['Fragile State Index Rank'] = metadata_dict['Fragile State Index Rank']
metadata['Country has current appeal'] = metadata_dict['Country has current appeal']
# Populate the dict with story fields
story_fields_dict = {}
if metadata_dict['storyURL']:
for field in story_fields:
story_fields_dict[field] = (metadata_dict[field])
metadata['Displacement_story'] = story_fields_dict
# Populate the dict with strand 1 data if the country has a current appeal
strand_01_dict = {}
strand_01_dict['Needs_Data'] = {}
for names_01 in strand_01_fields:
strand_01_dict[names_01] = (metadata_dict[names_01])
metadata['Strand_01_Needs'] = strand_01_dict
# Populate the dict with strand 2 data
strand_02_dict = {}
for names_02 in strand_02_fields:
strand_02_dict[names_02] = (metadata_dict[names_02])
metadata['Strand_02_People'] = strand_02_dict
# Populate the dict with strand 3 data
strand_03_dict = {}
strand_03_dict['Top 5 donors of humanitarian aid'] = []
for names_03 in strand_03_fields:
strand_03_dict[names_03] = (metadata_dict[names_03])
if metadata_dict['Top 5 Donors']:
strand_03_dict['Top 5 donors of humanitarian aid'] = metadata_dict['Top 5 Donors']
metadata['Strand_03_Aid'] = strand_03_dict
# At the higher level, structure the json with 'data' and 'metadata'
final_json = {
'data': data,
'metadata': metadata
}
return final_json, metadata, df_final
def run():
print 'Pulling and merging data'
final_json, metadata, final_csv = merge_data()
print 'Writing Combined JSON file'
with open(os.path.join(resources.constants.EXAMPLE_DERIVED_DATA_PATH, 'displacement_tracker.json'), 'w') as outfile:
json.dump(final_json, outfile, indent=4, separators=(',', ': '), ensure_ascii=True, sort_keys=True)
print 'Writing Combined JSON metadata file'
with open(os.path.join(resources.constants.EXAMPLE_DERIVED_DATA_PATH, 'displacement_tracker_metadata.json'), 'w') as outfile:
json.dump(metadata, outfile, indent=4, separators=(',', ': '), ensure_ascii=True, sort_keys=True)
print 'Writing Combined CSV file'
final_csv.to_csv(os.path.join(resources.constants.EXAMPLE_DERIVED_DATA_PATH, 'displacement_tracker.csv'), index_label='CountryCode', encoding='utf-8')
if __name__ == "__main__":
run()
| mit |
anthonyalmarza/trex | tests/test_pipelining.py | 1 | 4322 | # import sys
from twisted.trial import unittest
from twisted.internet import defer
# from twisted.python import log
import trex
from trex import redis
from .mixins import REDIS_HOST, REDIS_PORT
# log.startLogging(sys.stdout)
class InspectableTransport(object):
def __init__(self, transport):
self.original_transport = transport
self.write_history = []
def __getattr__(self, method):
if method == "write":
def write(data, *args, **kwargs):
self.write_history.append(data)
return self.original_transport.write(data, *args, **kwargs)
return write
return getattr(self.original_transport, method)
class TestRedisConnections(unittest.TestCase):
@defer.inlineCallbacks
def _assert_simple_sets_on_pipeline(self, db):
pipeline = yield db.pipeline()
self.assertTrue(pipeline.pipelining)
# Hook into the transport so we can inspect what is happening
# at the protocol level.
pipeline.transport = InspectableTransport(pipeline.transport)
pipeline.set("trex:test_pipeline", "foo")
pipeline.set("trex:test_pipeline", "bar")
pipeline.set("trex:test_pipeline2", "zip")
yield pipeline.execute_pipeline()
self.assertFalse(pipeline.pipelining)
result = yield db.get("trex:test_pipeline")
self.assertEqual(result, "bar")
result = yield db.get("trex:test_pipeline2")
self.assertEqual(result, "zip")
# Make sure that all SET commands were sent in a single pipelined write.
write_history = pipeline.transport.write_history
lines_in_first_write = write_history[0].split("\n")
sets_in_first_write = sum([1 for w in lines_in_first_write if "SET" in w])
self.assertEqual(sets_in_first_write, 3)
@defer.inlineCallbacks
def _wait_for_lazy_connection(self, db):
# For lazy connections, wait for the internal deferred to indicate
# that the connection is established.
yield db._connected
@defer.inlineCallbacks
def test_Connection(self):
db = yield redis.Connection(REDIS_HOST, REDIS_PORT, reconnect=False)
yield self._assert_simple_sets_on_pipeline(db=db)
yield db.disconnect()
@defer.inlineCallbacks
def test_ConnectionDB1(self):
db = yield redis.Connection(REDIS_HOST, REDIS_PORT, dbid=1,
reconnect=False)
yield self._assert_simple_sets_on_pipeline(db=db)
yield db.disconnect()
@defer.inlineCallbacks
def test_ConnectionPool(self):
db = yield redis.ConnectionPool(REDIS_HOST, REDIS_PORT, poolsize=2,
reconnect=False)
yield self._assert_simple_sets_on_pipeline(db=db)
yield db.disconnect()
@defer.inlineCallbacks
def test_lazyConnection(self):
db = redis.lazyConnection(REDIS_HOST, REDIS_PORT, reconnect=False)
yield self._wait_for_lazy_connection(db)
yield self._assert_simple_sets_on_pipeline(db=db)
yield db.disconnect()
@defer.inlineCallbacks
def test_lazyConnectionPool(self):
db = redis.lazyConnectionPool(REDIS_HOST, REDIS_PORT, reconnect=False)
yield self._wait_for_lazy_connection(db)
yield self._assert_simple_sets_on_pipeline(db=db)
yield db.disconnect()
@defer.inlineCallbacks
def test_ShardedConnection(self):
hosts = ["%s:%s" % (REDIS_HOST, REDIS_PORT)]
db = yield redis.ShardedConnection(hosts, reconnect=False)
try:
yield db.pipeline()
raise self.failureException("Expected sharding to disallow pipelining")
except NotImplementedError, e:
self.assertTrue("not supported" in str(e).lower())
yield db.disconnect()
@defer.inlineCallbacks
def test_ShardedConnectionPool(self):
hosts = ["%s:%s" % (REDIS_HOST, REDIS_PORT)]
db = yield redis.ShardedConnectionPool(hosts, reconnect=False)
try:
yield db.pipeline()
raise self.failureException("Expected sharding to disallow pipelining")
except NotImplementedError, e:
self.assertTrue("not supported" in str(e).lower())
yield db.disconnect()
| mit |
adedayo/intellij-community | python/lib/Lib/site-packages/django/contrib/staticfiles/management/commands/collectstatic.py | 71 | 8207 | import os
import sys
import shutil
from optparse import make_option
from django.conf import settings
from django.core.files.storage import get_storage_class
from django.core.management.base import CommandError, NoArgsCommand
from django.contrib.staticfiles import finders
class Command(NoArgsCommand):
"""
Command that allows to copy or symlink media files from different
locations to the settings.STATIC_ROOT.
"""
option_list = NoArgsCommand.option_list + (
make_option('--noinput', action='store_false', dest='interactive',
default=True, help="Do NOT prompt the user for input of any "
"kind."),
make_option('-i', '--ignore', action='append', default=[],
dest='ignore_patterns', metavar='PATTERN',
help="Ignore files or directories matching this glob-style "
"pattern. Use multiple times to ignore more."),
make_option('-n', '--dry-run', action='store_true', dest='dry_run',
default=False, help="Do everything except modify the filesystem."),
make_option('-l', '--link', action='store_true', dest='link',
default=False, help="Create a symbolic link to each file instead of copying."),
make_option('--no-default-ignore', action='store_false',
dest='use_default_ignore_patterns', default=True,
help="Don't ignore the common private glob-style patterns 'CVS', "
"'.*' and '*~'."),
)
help = "Collect static files from apps and other locations in a single location."
def handle_noargs(self, **options):
symlink = options['link']
ignore_patterns = options['ignore_patterns']
if options['use_default_ignore_patterns']:
ignore_patterns += ['CVS', '.*', '*~']
ignore_patterns = list(set(ignore_patterns))
self.copied_files = set()
self.symlinked_files = set()
self.unmodified_files = set()
self.destination_storage = get_storage_class(settings.STATICFILES_STORAGE)()
try:
self.destination_storage.path('')
except NotImplementedError:
self.destination_local = False
else:
self.destination_local = True
if symlink:
if sys.platform == 'win32':
raise CommandError("Symlinking is not supported by this "
"platform (%s)." % sys.platform)
if not self.destination_local:
raise CommandError("Can't symlink to a remote destination.")
# Warn before doing anything more.
if options.get('interactive'):
confirm = raw_input("""
You have requested to collate static files and collect them at the destination
location as specified in your settings file.
This will overwrite existing files.
Are you sure you want to do this?
Type 'yes' to continue, or 'no' to cancel: """)
if confirm != 'yes':
raise CommandError("Static files build cancelled.")
# Use ints for file times (ticket #14665)
os.stat_float_times(False)
for finder in finders.get_finders():
for source, prefix, storage in finder.list(ignore_patterns):
self.copy_file(source, prefix, storage, **options)
verbosity = int(options.get('verbosity', 1))
actual_count = len(self.copied_files) + len(self.symlinked_files)
unmodified_count = len(self.unmodified_files)
if verbosity >= 1:
self.stdout.write("\n%s static file%s %s to '%s'%s.\n"
% (actual_count, actual_count != 1 and 's' or '',
symlink and 'symlinked' or 'copied',
settings.STATIC_ROOT,
unmodified_count and ' (%s unmodified)'
% unmodified_count or ''))
def copy_file(self, source, prefix, source_storage, **options):
"""
Attempt to copy (or symlink) ``source`` to ``destination``,
returning True if successful.
"""
source_path = source_storage.path(source)
try:
source_last_modified = source_storage.modified_time(source)
except (OSError, NotImplementedError):
source_last_modified = None
if prefix:
destination = '/'.join([prefix, source])
else:
destination = source
symlink = options['link']
dry_run = options['dry_run']
verbosity = int(options.get('verbosity', 1))
if destination in self.copied_files:
if verbosity >= 2:
self.stdout.write("Skipping '%s' (already copied earlier)\n"
% destination)
return False
if destination in self.symlinked_files:
if verbosity >= 2:
self.stdout.write("Skipping '%s' (already linked earlier)\n"
% destination)
return False
if self.destination_storage.exists(destination):
try:
destination_last_modified = \
self.destination_storage.modified_time(destination)
except (OSError, NotImplementedError):
# storage doesn't support ``modified_time`` or failed.
pass
else:
destination_is_link = os.path.islink(
self.destination_storage.path(destination))
if destination_last_modified >= source_last_modified:
if (not symlink and not destination_is_link):
if verbosity >= 2:
self.stdout.write("Skipping '%s' (not modified)\n"
% destination)
self.unmodified_files.add(destination)
return False
if dry_run:
if verbosity >= 2:
self.stdout.write("Pretending to delete '%s'\n"
% destination)
else:
if verbosity >= 2:
self.stdout.write("Deleting '%s'\n" % destination)
self.destination_storage.delete(destination)
if symlink:
destination_path = self.destination_storage.path(destination)
if dry_run:
if verbosity >= 1:
self.stdout.write("Pretending to symlink '%s' to '%s'\n"
% (source_path, destination_path))
else:
if verbosity >= 1:
self.stdout.write("Symlinking '%s' to '%s'\n"
% (source_path, destination_path))
try:
os.makedirs(os.path.dirname(destination_path))
except OSError:
pass
os.symlink(source_path, destination_path)
self.symlinked_files.add(destination)
else:
if dry_run:
if verbosity >= 1:
self.stdout.write("Pretending to copy '%s' to '%s'\n"
% (source_path, destination))
else:
if self.destination_local:
destination_path = self.destination_storage.path(destination)
try:
os.makedirs(os.path.dirname(destination_path))
except OSError:
pass
shutil.copy2(source_path, destination_path)
if verbosity >= 1:
self.stdout.write("Copying '%s' to '%s'\n"
% (source_path, destination_path))
else:
source_file = source_storage.open(source)
self.destination_storage.save(destination, source_file)
if verbosity >= 1:
self.stdout.write("Copying %s to %s\n"
% (source_path, destination))
self.copied_files.add(destination)
return True
| apache-2.0 |
ganeshgore/myremolab | server/launch/sample_balanced2_concurrent_experiments/main_machine/lab_and_experiment/experiment16/server_config.py | 242 | 1525 | #!/usr/bin/env python
#-*-*- encoding: utf-8 -*-*-
weblab_xilinx_experiment_xilinx_device = 'FPGA'
weblab_xilinx_experiment_port_number = 1
# This should be something like this:
# import os as _os
# xilinx_home = _os.getenv('XILINX_HOME')
# if xilinx_home == None:
# if _os.name == 'nt':
# xilinx_home = r'C:\Program Files\Xilinx'
# elif _os.name == 'posix':
# xilinx_home = r"/home/nctrun/Xilinx"
#
# if _os.name == 'nt':
# xilinx_impact_full_path = [xilinx_home + r'\bin\nt\impact']
# elif _os.name == 'posix':
# xilinx_impact_full_path = [xilinx_home + r'/bin/lin/impact']
# But for testing we are going to fake it:
xilinx_home = "."
xilinx_impact_full_path = ["python","./tests/unit/weblab/experiment/devices/xilinx_impact/fake_impact.py" ]
xilinx_device_to_program = 'XilinxImpact' # 'JTagBlazer', 'DigilentAdept'
xilinx_device_to_send_commands = 'SerialPort' # 'HttpDevice'
digilent_adept_full_path = ["python","./test/unit/weblab/experiment/devices/digilent_adept/fake_digilent_adept.py" ]
digilent_adept_batch_content = """something with the variable $FILE"""
xilinx_http_device_ip_FPGA = "192.168.50.138"
xilinx_http_device_port_FPGA = 80
xilinx_http_device_app_FPGA = ""
xilinx_batch_content_FPGA = """setMode -bs
setCable -port auto
addDevice -position 1 -file $FILE
Program -p 1
exit
"""
# Though it is not really a FPGA, the webcam url var name depends on the device,
# specified above.
fpga_webcam_url = '''https://www.weblab.deusto.es/webcam/fpga0/image.jpg''' | bsd-2-clause |
stshine/servo | tests/wpt/web-platform-tests/tools/html5lib/html5lib/trie/py.py | 817 | 1763 | from __future__ import absolute_import, division, unicode_literals
from six import text_type
from bisect import bisect_left
from ._base import Trie as ABCTrie
class Trie(ABCTrie):
def __init__(self, data):
if not all(isinstance(x, text_type) for x in data.keys()):
raise TypeError("All keys must be strings")
self._data = data
self._keys = sorted(data.keys())
self._cachestr = ""
self._cachepoints = (0, len(data))
def __contains__(self, key):
return key in self._data
def __len__(self):
return len(self._data)
def __iter__(self):
return iter(self._data)
def __getitem__(self, key):
return self._data[key]
def keys(self, prefix=None):
if prefix is None or prefix == "" or not self._keys:
return set(self._keys)
if prefix.startswith(self._cachestr):
lo, hi = self._cachepoints
start = i = bisect_left(self._keys, prefix, lo, hi)
else:
start = i = bisect_left(self._keys, prefix)
keys = set()
if start == len(self._keys):
return keys
while self._keys[i].startswith(prefix):
keys.add(self._keys[i])
i += 1
self._cachestr = prefix
self._cachepoints = (start, i)
return keys
def has_keys_with_prefix(self, prefix):
if prefix in self._data:
return True
if prefix.startswith(self._cachestr):
lo, hi = self._cachepoints
i = bisect_left(self._keys, prefix, lo, hi)
else:
i = bisect_left(self._keys, prefix)
if i == len(self._keys):
return False
return self._keys[i].startswith(prefix)
| mpl-2.0 |
rhertzog/django | django/contrib/admin/models.py | 72 | 5618 | from __future__ import unicode_literals
import json
from django.conf import settings
from django.contrib.admin.utils import quote
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.urls import NoReverseMatch, reverse
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible, smart_text
from django.utils.text import get_text_list
from django.utils.translation import ugettext, ugettext_lazy as _
ADDITION = 1
CHANGE = 2
DELETION = 3
class LogEntryManager(models.Manager):
use_in_migrations = True
def log_action(self, user_id, content_type_id, object_id, object_repr, action_flag, change_message=''):
if isinstance(change_message, list):
change_message = json.dumps(change_message)
self.model.objects.create(
user_id=user_id,
content_type_id=content_type_id,
object_id=smart_text(object_id),
object_repr=object_repr[:200],
action_flag=action_flag,
change_message=change_message,
)
@python_2_unicode_compatible
class LogEntry(models.Model):
action_time = models.DateTimeField(
_('action time'),
default=timezone.now,
editable=False,
)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
models.CASCADE,
verbose_name=_('user'),
)
content_type = models.ForeignKey(
ContentType,
models.SET_NULL,
verbose_name=_('content type'),
blank=True, null=True,
)
object_id = models.TextField(_('object id'), blank=True, null=True)
# Translators: 'repr' means representation (https://docs.python.org/3/library/functions.html#repr)
object_repr = models.CharField(_('object repr'), max_length=200)
action_flag = models.PositiveSmallIntegerField(_('action flag'))
# change_message is either a string or a JSON structure
change_message = models.TextField(_('change message'), blank=True)
objects = LogEntryManager()
class Meta:
verbose_name = _('log entry')
verbose_name_plural = _('log entries')
db_table = 'django_admin_log'
ordering = ('-action_time',)
def __repr__(self):
return smart_text(self.action_time)
def __str__(self):
if self.is_addition():
return ugettext('Added "%(object)s".') % {'object': self.object_repr}
elif self.is_change():
return ugettext('Changed "%(object)s" - %(changes)s') % {
'object': self.object_repr,
'changes': self.get_change_message(),
}
elif self.is_deletion():
return ugettext('Deleted "%(object)s."') % {'object': self.object_repr}
return ugettext('LogEntry Object')
def is_addition(self):
return self.action_flag == ADDITION
def is_change(self):
return self.action_flag == CHANGE
def is_deletion(self):
return self.action_flag == DELETION
def get_change_message(self):
"""
If self.change_message is a JSON structure, interpret it as a change
string, properly translated.
"""
if self.change_message and self.change_message[0] == '[':
try:
change_message = json.loads(self.change_message)
except ValueError:
return self.change_message
messages = []
for sub_message in change_message:
if 'added' in sub_message:
if sub_message['added']:
sub_message['added']['name'] = ugettext(sub_message['added']['name'])
messages.append(ugettext('Added {name} "{object}".').format(**sub_message['added']))
else:
messages.append(ugettext('Added.'))
elif 'changed' in sub_message:
sub_message['changed']['fields'] = get_text_list(
sub_message['changed']['fields'], ugettext('and')
)
if 'name' in sub_message['changed']:
sub_message['changed']['name'] = ugettext(sub_message['changed']['name'])
messages.append(ugettext('Changed {fields} for {name} "{object}".').format(
**sub_message['changed']
))
else:
messages.append(ugettext('Changed {fields}.').format(**sub_message['changed']))
elif 'deleted' in sub_message:
sub_message['deleted']['name'] = ugettext(sub_message['deleted']['name'])
messages.append(ugettext('Deleted {name} "{object}".').format(**sub_message['deleted']))
change_message = ' '.join(msg[0].upper() + msg[1:] for msg in messages)
return change_message or ugettext('No fields changed.')
else:
return self.change_message
def get_edited_object(self):
"Returns the edited object represented by this log entry"
return self.content_type.get_object_for_this_type(pk=self.object_id)
def get_admin_url(self):
"""
Returns the admin URL to edit the object represented by this log entry.
"""
if self.content_type and self.object_id:
url_name = 'admin:%s_%s_change' % (self.content_type.app_label, self.content_type.model)
try:
return reverse(url_name, args=(quote(self.object_id),))
except NoReverseMatch:
pass
return None
| bsd-3-clause |
silenceli/nova | nova/objects/network.py | 6 | 10160 | # Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from oslo.config import cfg
from nova import db
from nova import exception
from nova.i18n import _
from nova import objects
from nova.objects import base as obj_base
from nova.objects import fields
from nova import utils
network_opts = [
cfg.BoolOpt('share_dhcp_address',
default=False,
help='DEPRECATED: THIS VALUE SHOULD BE SET WHEN CREATING THE '
'NETWORK. If True in multi_host mode, all compute hosts '
'share the same dhcp address. The same IP address used '
'for DHCP will be added on each nova-network node which '
'is only visible to the vms on the same host.'),
cfg.IntOpt('network_device_mtu',
help='DEPRECATED: THIS VALUE SHOULD BE SET WHEN CREATING THE '
'NETWORK. MTU setting for network interface.'),
]
CONF = cfg.CONF
CONF.register_opts(network_opts)
# TODO(berrange): Remove NovaObjectDictCompat
class Network(obj_base.NovaPersistentObject, obj_base.NovaObject,
obj_base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: Added in_use_on_host()
# Version 1.2: Added mtu, dhcp_server, enable_dhcp, share_address
VERSION = '1.2'
fields = {
'id': fields.IntegerField(),
'label': fields.StringField(),
'injected': fields.BooleanField(),
'cidr': fields.IPV4NetworkField(nullable=True),
'cidr_v6': fields.IPV6NetworkField(nullable=True),
'multi_host': fields.BooleanField(),
'netmask': fields.IPV4AddressField(nullable=True),
'gateway': fields.IPV4AddressField(nullable=True),
'broadcast': fields.IPV4AddressField(nullable=True),
'netmask_v6': fields.IPV6AddressField(nullable=True),
'gateway_v6': fields.IPV6AddressField(nullable=True),
'bridge': fields.StringField(nullable=True),
'bridge_interface': fields.StringField(nullable=True),
'dns1': fields.IPAddressField(nullable=True),
'dns2': fields.IPAddressField(nullable=True),
'vlan': fields.IntegerField(nullable=True),
'vpn_public_address': fields.IPAddressField(nullable=True),
'vpn_public_port': fields.IntegerField(nullable=True),
'vpn_private_address': fields.IPAddressField(nullable=True),
'dhcp_start': fields.IPV4AddressField(nullable=True),
'rxtx_base': fields.IntegerField(nullable=True),
'project_id': fields.UUIDField(nullable=True),
'priority': fields.IntegerField(nullable=True),
'host': fields.StringField(nullable=True),
'uuid': fields.UUIDField(),
'mtu': fields.IntegerField(nullable=True),
'dhcp_server': fields.IPAddressField(nullable=True),
'enable_dhcp': fields.BooleanField(),
'share_address': fields.BooleanField(),
}
@staticmethod
def _convert_legacy_ipv6_netmask(netmask):
"""Handle netmask_v6 possibilities from the database.
Historically, this was stored as just an integral CIDR prefix,
but in the future it should be stored as an actual netmask.
Be tolerant of either here.
"""
try:
prefix = int(netmask)
return netaddr.IPNetwork('1::/%i' % prefix).netmask
except ValueError:
pass
try:
return netaddr.IPNetwork(netmask).netmask
except netaddr.AddrFormatError:
raise ValueError(_('IPv6 netmask "%s" must be a netmask '
'or integral prefix') % netmask)
def obj_make_compatible(self, primitive, target_version):
target_version = utils.convert_version_to_tuple(target_version)
if target_version < (1, 2):
if 'mtu' in primitive:
del primitive['mtu']
if 'enable_dhcp' in primitive:
del primitive['enable_dhcp']
if 'dhcp_server' in primitive:
del primitive['dhcp_server']
if 'share_address' in primitive:
del primitive['share_address']
@staticmethod
def _from_db_object(context, network, db_network):
for field in network.fields:
db_value = db_network[field]
if field is 'netmask_v6' and db_value is not None:
db_value = network._convert_legacy_ipv6_netmask(db_value)
if field is 'mtu' and db_value is None:
db_value = CONF.network_device_mtu
if field is 'dhcp_server' and db_value is None:
db_value = db_network['gateway']
if field is 'share_address' and CONF.share_dhcp_address:
db_value = CONF.share_dhcp_address
network[field] = db_value
network._context = context
network.obj_reset_changes()
return network
@obj_base.remotable_classmethod
def get_by_id(cls, context, network_id, project_only='allow_none'):
db_network = db.network_get(context, network_id,
project_only=project_only)
return cls._from_db_object(context, cls(), db_network)
@obj_base.remotable_classmethod
def get_by_uuid(cls, context, network_uuid):
db_network = db.network_get_by_uuid(context, network_uuid)
return cls._from_db_object(context, cls(), db_network)
@obj_base.remotable_classmethod
def get_by_cidr(cls, context, cidr):
db_network = db.network_get_by_cidr(context, cidr)
return cls._from_db_object(context, cls(), db_network)
@obj_base.remotable_classmethod
def associate(cls, context, project_id, network_id=None, force=False):
db.network_associate(context, project_id, network_id=network_id,
force=force)
@obj_base.remotable_classmethod
def disassociate(cls, context, network_id, host=False, project=False):
db.network_disassociate(context, network_id, host, project)
@obj_base.remotable_classmethod
def in_use_on_host(cls, context, network_id, host):
return db.network_in_use_on_host(context, network_id, host)
def _get_primitive_changes(self):
changes = {}
for key, value in self.obj_get_changes().items():
if isinstance(value, netaddr.IPAddress):
changes[key] = str(value)
else:
changes[key] = value
return changes
@obj_base.remotable
def create(self, context):
updates = self._get_primitive_changes()
if 'id' in updates:
raise exception.ObjectActionError(action='create',
reason='already created')
db_network = db.network_create_safe(context, updates)
self._from_db_object(context, self, db_network)
@obj_base.remotable
def destroy(self, context):
db.network_delete_safe(context, self.id)
self.deleted = True
self.obj_reset_changes(['deleted'])
@obj_base.remotable
def save(self, context):
updates = self._get_primitive_changes()
if 'netmask_v6' in updates:
# NOTE(danms): For some reason, historical code stores the
# IPv6 netmask as just the CIDR mask length, so convert that
# back here before saving for now.
updates['netmask_v6'] = netaddr.IPNetwork(
updates['netmask_v6']).netmask
set_host = 'host' in updates
if set_host:
db.network_set_host(context, self.id, updates.pop('host'))
if updates:
db_network = db.network_update(context, self.id, updates)
elif set_host:
db_network = db.network_get(context, self.id)
else:
db_network = None
if db_network is not None:
self._from_db_object(context, self, db_network)
class NetworkList(obj_base.ObjectListBase, obj_base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: Added get_by_project()
# Version 1.2: Network <= version 1.2
VERSION = '1.2'
fields = {
'objects': fields.ListOfObjectsField('Network'),
}
child_versions = {
'1.0': '1.0',
'1.1': '1.1',
'1.2': '1.2',
}
@obj_base.remotable_classmethod
def get_all(cls, context, project_only='allow_none'):
db_networks = db.network_get_all(context, project_only)
return obj_base.obj_make_list(context, cls(context), objects.Network,
db_networks)
@obj_base.remotable_classmethod
def get_by_uuids(cls, context, network_uuids, project_only='allow_none'):
db_networks = db.network_get_all_by_uuids(context, network_uuids,
project_only)
return obj_base.obj_make_list(context, cls(context), objects.Network,
db_networks)
@obj_base.remotable_classmethod
def get_by_host(cls, context, host):
db_networks = db.network_get_all_by_host(context, host)
return obj_base.obj_make_list(context, cls(context), objects.Network,
db_networks)
@obj_base.remotable_classmethod
def get_by_project(cls, context, project_id, associate=True):
db_networks = db.project_get_networks(context, project_id,
associate=associate)
return obj_base.obj_make_list(context, cls(context), objects.Network,
db_networks)
| apache-2.0 |
paulsmith/geodjango | django/contrib/localflavor/jp/jp_prefectures.py | 543 | 2089 | from django.utils.translation import ugettext_lazy
JP_PREFECTURES = (
('hokkaido', ugettext_lazy('Hokkaido'),),
('aomori', ugettext_lazy('Aomori'),),
('iwate', ugettext_lazy('Iwate'),),
('miyagi', ugettext_lazy('Miyagi'),),
('akita', ugettext_lazy('Akita'),),
('yamagata', ugettext_lazy('Yamagata'),),
('fukushima', ugettext_lazy('Fukushima'),),
('ibaraki', ugettext_lazy('Ibaraki'),),
('tochigi', ugettext_lazy('Tochigi'),),
('gunma', ugettext_lazy('Gunma'),),
('saitama', ugettext_lazy('Saitama'),),
('chiba', ugettext_lazy('Chiba'),),
('tokyo', ugettext_lazy('Tokyo'),),
('kanagawa', ugettext_lazy('Kanagawa'),),
('yamanashi', ugettext_lazy('Yamanashi'),),
('nagano', ugettext_lazy('Nagano'),),
('niigata', ugettext_lazy('Niigata'),),
('toyama', ugettext_lazy('Toyama'),),
('ishikawa', ugettext_lazy('Ishikawa'),),
('fukui', ugettext_lazy('Fukui'),),
('gifu', ugettext_lazy('Gifu'),),
('shizuoka', ugettext_lazy('Shizuoka'),),
('aichi', ugettext_lazy('Aichi'),),
('mie', ugettext_lazy('Mie'),),
('shiga', ugettext_lazy('Shiga'),),
('kyoto', ugettext_lazy('Kyoto'),),
('osaka', ugettext_lazy('Osaka'),),
('hyogo', ugettext_lazy('Hyogo'),),
('nara', ugettext_lazy('Nara'),),
('wakayama', ugettext_lazy('Wakayama'),),
('tottori', ugettext_lazy('Tottori'),),
('shimane', ugettext_lazy('Shimane'),),
('okayama', ugettext_lazy('Okayama'),),
('hiroshima', ugettext_lazy('Hiroshima'),),
('yamaguchi', ugettext_lazy('Yamaguchi'),),
('tokushima', ugettext_lazy('Tokushima'),),
('kagawa', ugettext_lazy('Kagawa'),),
('ehime', ugettext_lazy('Ehime'),),
('kochi', ugettext_lazy('Kochi'),),
('fukuoka', ugettext_lazy('Fukuoka'),),
('saga', ugettext_lazy('Saga'),),
('nagasaki', ugettext_lazy('Nagasaki'),),
('kumamoto', ugettext_lazy('Kumamoto'),),
('oita', ugettext_lazy('Oita'),),
('miyazaki', ugettext_lazy('Miyazaki'),),
('kagoshima', ugettext_lazy('Kagoshima'),),
('okinawa', ugettext_lazy('Okinawa'),),
)
| bsd-3-clause |
MCGarvey/django-calaccess-raw-data | calaccess_raw/models/campaign.py | 15 | 138941 | from __future__ import unicode_literals
from calaccess_raw import fields
from django.utils.encoding import python_2_unicode_compatible
from .base import CalAccessBaseModel
@python_2_unicode_compatible
class CvrSoCd(CalAccessBaseModel):
"""
Cover page for a statement of organization creation or termination
form filed by a slate-mailer organization or recipient committee.
"""
UNIQUE_KEY = (
"FILING_ID",
"AMEND_ID",
"LINE_ITEM",
"REC_TYPE",
"FORM_TYPE",
)
acct_opendt = fields.DateTimeField(
db_column="ACCT_OPENDT",
null=True,
help_text='This field is undocumented',
)
ACTIVITY_LEVEL_CHOICES = (
("CI", "City"),
("CO", "County"),
("ST", "State"),
("", "Unknown"),
)
actvty_lvl = fields.CharField(
max_length=2,
db_column="ACTVTY_LVL",
blank=True,
choices=ACTIVITY_LEVEL_CHOICES,
verbose_name="Activity level",
help_text="Organization's level of activity"
)
amend_id = fields.IntegerField(
db_column='AMEND_ID',
db_index=True,
help_text="Amendment identification number. A number of 0 is the \
original filing and 1 to 999 amendments.",
verbose_name="amendment ID"
)
bank_adr1 = fields.CharField(
max_length=55,
db_column="BANK_ADR1",
blank=True,
help_text='This field is undocumented',
)
bank_adr2 = fields.CharField(
max_length=55,
db_column="BANK_ADR2",
blank=True,
help_text='This field is undocumented',
)
bank_city = fields.CharField(
max_length=30,
db_column="BANK_CITY",
blank=True,
help_text='This field is undocumented',
)
bank_nam = fields.CharField(
max_length=200,
db_column="BANK_NAM",
blank=True,
help_text='This field is undocumented',
)
bank_phon = fields.CharField(
max_length=20,
db_column="BANK_PHON",
blank=True,
help_text='This field is undocumented',
)
bank_st = fields.CharField(
max_length=2,
db_column="BANK_ST",
blank=True,
help_text='This field is undocumented',
)
bank_zip4 = fields.CharField(
max_length=10,
db_column="BANK_ZIP4",
blank=True,
help_text='This field is undocumented',
)
brdbase_cb = fields.CharField(
max_length=1,
db_column="BRDBASE_CB",
blank=True,
help_text='This field is undocumented',
)
city = fields.CharField(
max_length=30,
db_column="CITY",
blank=True,
help_text='This field is undocumented',
)
cmte_email = fields.CharField(
max_length=60,
db_column="CMTE_EMAIL",
blank=True,
help_text='This field is undocumented',
)
cmte_fax = fields.CharField(
max_length=20,
db_column="CMTE_FAX",
blank=True,
help_text='This field is undocumented',
)
com82013id = fields.CharField(
max_length=9,
db_column="COM82013ID",
blank=True,
help_text='This field is undocumented',
)
com82013nm = fields.CharField(
max_length=200,
db_column="COM82013NM",
blank=True,
help_text='This field is undocumented',
)
com82013yn = fields.CharField(
max_length=1,
db_column="COM82013YN",
blank=True,
help_text='This field is undocumented',
)
control_cb = fields.CharField(
max_length=1,
db_column="CONTROL_CB",
blank=True,
help_text='This field is undocumented',
)
county_act = fields.CharField(
max_length=20,
db_column="COUNTY_ACT",
blank=True,
help_text='This field is undocumented',
)
county_res = fields.CharField(
max_length=20,
db_column="COUNTY_RES",
blank=True,
help_text='This field is undocumented',
)
ENTITY_CODE_CHOICES = (
# Defined here:
# http://www.documentcloud.org/documents/1308003-cal-access-cal-\
# format.html#document/p9
('', 'Unknown'),
('BMC', 'Ballot measure committee'),
('CAO', 'Candidate/officeholder'),
('COM', 'Committee'),
('CTL', 'Controlled committee'),
('RCP', 'Recipient committee'),
('SMO', 'Slate-mailer organization'),
)
entity_cd = fields.CharField(
max_length=3,
db_column="ENTITY_CD",
blank=True,
choices=ENTITY_CODE_CHOICES,
verbose_name="Entity code"
)
filer_id = fields.CharField(
verbose_name='filer ID',
db_column='FILER_ID',
max_length=9,
blank=True,
db_index=True,
help_text="Filer's unique identification number",
)
filer_namf = fields.CharField(
max_length=45,
db_column="FILER_NAMF",
blank=True,
verbose_name="Filer first name"
)
filer_naml = fields.CharField(
max_length=200,
db_column="FILER_NAML",
blank=True,
verbose_name="Filer last name"
)
filer_nams = fields.CharField(
max_length=10,
db_column="FILER_NAMS",
blank=True,
verbose_name="Filer name suffix"
)
filer_namt = fields.CharField(
max_length=10,
db_column="FILER_NAMT",
blank=True,
verbose_name="Filer name title"
)
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing ID',
help_text="Unique filing identificiation number"
)
FORM_TYPE_CHOICES = (
('F400', 'Form 400 (Statement of organization, \
slate mailer organization)'),
('F402', 'Form 402 (Statement of termination, \
slate mailer organization'),
('F410', 'Form 410 (Statement of organization, recipient committee)'),
)
form_type = fields.CharField(
max_length=4,
db_column="FORM_TYPE",
choices=FORM_TYPE_CHOICES,
help_text='Name of the source filing form or schedule'
)
genpurp_cb = fields.CharField(
max_length=1,
db_column="GENPURP_CB",
blank=True,
help_text='This field is undocumented',
)
gpc_descr = fields.CharField(
max_length=300,
db_column="GPC_DESCR",
blank=True,
help_text='This field is undocumented',
)
mail_city = fields.CharField(
max_length=30,
db_column="MAIL_CITY",
blank=True,
help_text='This field is undocumented',
)
mail_st = fields.CharField(
max_length=2,
db_column="MAIL_ST",
blank=True,
help_text='This field is undocumented',
)
mail_zip4 = fields.CharField(
max_length=10,
db_column="MAIL_ZIP4",
blank=True,
help_text='This field is undocumented',
)
phone = fields.CharField(
max_length=20,
db_column="PHONE",
blank=True,
help_text='This field is undocumented',
)
primfc_cb = fields.CharField(
max_length=1,
db_column="PRIMFC_CB",
blank=True,
help_text='This field is undocumented',
)
qualfy_dt = fields.DateTimeField(
db_column="QUALFY_DT",
null=True,
verbose_name="Date qualified",
help_text="Date qualified as an organization"
)
qual_cb = fields.CharField(
max_length=1,
db_column="QUAL_CB",
blank=True,
help_text='This field is undocumented',
)
REC_TYPE_CHOICES = (
("CVR", "CVR"),
)
rec_type = fields.CharField(
verbose_name='record type',
db_column='REC_TYPE',
max_length=4,
db_index=True,
choices=REC_TYPE_CHOICES,
)
report_num = fields.CharField(
max_length=3,
db_column="REPORT_NUM",
blank=True,
help_text='This field is undocumented',
)
rpt_date = fields.DateTimeField(
db_column="RPT_DATE",
null=True,
help_text='This field is undocumented',
)
smcont_qualdt = fields.DateTimeField(
db_column="SMCONT_QUALDT",
null=True,
help_text='This field is undocumented',
)
sponsor_cb = fields.CharField(
max_length=1,
db_column="SPONSOR_CB",
blank=True,
help_text='This field is undocumented',
)
st = fields.CharField(
max_length=2,
db_column="ST",
blank=True,
help_text='This field is undocumented',
)
surplusdsp = fields.CharField(
max_length=90,
db_column="SURPLUSDSP",
blank=True,
help_text='This field is undocumented',
)
term_date = fields.DateTimeField(
db_column="TERM_DATE",
null=True,
help_text='This field is undocumented',
)
tres_city = fields.CharField(
max_length=30,
db_column="TRES_CITY",
blank=True,
verbose_name="Treasurer's city"
)
tres_namf = fields.CharField(
max_length=45,
db_column="TRES_NAMF",
blank=True,
verbose_name="Treasurer's first name"
)
tres_naml = fields.CharField(
max_length=200,
db_column="TRES_NAML",
blank=True,
verbose_name="Treasurer's last name"
)
tres_nams = fields.CharField(
max_length=10,
db_column="TRES_NAMS",
blank=True,
verbose_name="Treasurer's name suffix"
)
tres_namt = fields.CharField(
max_length=10,
db_column="TRES_NAMT",
blank=True,
verbose_name="Treasurer's name title"
)
tres_phon = fields.CharField(
max_length=20,
db_column="TRES_PHON",
blank=True,
verbose_name="Treasurer's phone number"
)
tres_st = fields.CharField(
max_length=2,
db_column="TRES_ST",
blank=True,
verbose_name="Treasurer's street",
)
tres_zip4 = fields.CharField(
max_length=10,
db_column="TRES_ZIP4",
blank=True,
help_text="Treasurer's ZIP Code"
)
zip4 = fields.CharField(
max_length=10,
db_column="ZIP4",
blank=True,
help_text='This field is undocumented',
)
class Meta:
app_label = 'calaccess_raw'
db_table = "CVR_SO_CD"
verbose_name = 'CVR_SO_CD'
verbose_name_plural = 'CVR_SO_CD'
def __str__(self):
return str(self.filing_id)
@python_2_unicode_compatible
class Cvr2SoCd(CalAccessBaseModel):
"""
Additional names and committees information included on the second page
of a statement of organization creation form filed
by a slate-mailer organization or recipient committee.
"""
UNIQUE_KEY = (
"FILING_ID",
"AMEND_ID",
"LINE_ITEM",
"REC_TYPE",
"FORM_TYPE"
)
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing ID',
help_text="Unique filing identificiation number"
)
amend_id = fields.IntegerField(
db_column='AMEND_ID',
db_index=True,
help_text="Amendment identification number. A number of 0 is the \
original filing and 1 to 999 amendments.",
verbose_name="amendment ID"
)
line_item = fields.IntegerField(
db_column='LINE_ITEM',
help_text="Line item number of this record",
db_index=True,
)
REC_TYPE_CHOICES = (
("CVR2", "CVR2"),
)
rec_type = fields.CharField(
verbose_name='record type',
db_column='REC_TYPE',
max_length=4,
db_index=True,
choices=REC_TYPE_CHOICES,
)
FORM_TYPE_CHOICES = (
('F400', 'Form 400 (Statement of organization, \
slate mailer organization)'),
('F410', 'Form 410 (Statement of organization, recipient committee)'),
)
form_type = fields.CharField(
choices=FORM_TYPE_CHOICES,
db_column='FORM_TYPE',
max_length=4,
help_text='Name of the source filing form or schedule'
)
tran_id = fields.CharField(
verbose_name='transaction ID',
max_length=20,
db_column='TRAN_ID',
blank=True,
help_text='Permanent value unique to this item',
)
ENTITY_CODE_CHOICES = (
# Defined here:
# http://www.documentcloud.org/documents/1308003-cal-access-cal-\
# format.html#document/p9
('', 'Unknown'),
('ATH', 'Authorizing individual'),
('ATR', 'Assistant treasurer'),
('BMN', 'BMN (Unknown)'),
('BNM', 'Ballot measure\'s name/title'),
('CAO', 'Candidate/officeholder'),
('COM', 'Committee'),
('CTL', 'Controlled committee'),
('OFF', 'Officer'),
('POF', 'Principal officer'),
('PRO', 'Proponent'),
('SPO', 'Sponsor'),
)
entity_cd = fields.CharField(
db_column='ENTITY_CD',
max_length=3,
blank=True,
verbose_name='entity code',
choices=ENTITY_CODE_CHOICES,
)
enty_naml = fields.CharField(
db_column='ENTY_NAML',
max_length=194,
blank=True,
help_text="Entity's business name or last name if the entity is an \
individual"
)
enty_namf = fields.CharField(
db_column='ENTY_NAMF',
max_length=34,
blank=True,
help_text="Entity's first name if the entity is an individual"
)
enty_namt = fields.CharField(
db_column='ENTY_NAMT',
max_length=9,
blank=True,
help_text="Entity's name prefix or title if the entity is an \
individual"
)
enty_nams = fields.CharField(
db_column='ENTY_NAMS',
max_length=10,
blank=True,
help_text="Entity's name suffix if the entity is an individual"
)
item_cd = fields.CharField(
db_column='ITEM_CD',
max_length=4,
blank=True,
help_text="Section of the Statement of Organization this \
itemization relates to. See CAL document for the definition \
of legal values for this column."
)
mail_city = fields.CharField(
db_column='MAIL_CITY',
max_length=25,
blank=True,
help_text="City portion of the entity's mailing address"
)
mail_st = fields.CharField(
db_column='MAIL_ST',
max_length=4,
blank=True,
help_text="State portion of the entity's mailing address"
)
mail_zip4 = fields.CharField(
db_column='MAIL_ZIP4',
max_length=10,
blank=True,
help_text="Zipcode portion of the entity's mailing address"
)
day_phone = fields.CharField(
db_column='DAY_PHONE',
max_length=20,
blank=True,
help_text="Entity's daytime phone number"
)
fax_phone = fields.CharField(
db_column='FAX_PHONE',
max_length=20,
blank=True,
help_text="Entity's fax number"
)
email_adr = fields.CharField(
db_column='EMAIL_ADR',
max_length=40,
blank=True,
help_text="Email address. Not contained in current forms."
)
cmte_id = fields.IntegerField(
db_column='CMTE_ID',
blank=True,
null=True,
verbose_name="Committee ID",
help_text="Entity's identification number"
)
ind_group = fields.CharField(
db_column='IND_GROUP',
max_length=87,
blank=True,
help_text="Industry group/affiliation description"
)
office_cd = fields.CharField(
db_column='OFFICE_CD',
max_length=4,
blank=True,
help_text="Code that identifies the office being sought. See \
CAL document for a list of valid codes."
)
offic_dscr = fields.CharField(
db_column='OFFIC_DSCR',
max_length=40,
blank=True,
help_text="Office sought description used if the office sought code \
(OFFICE_CD) equals other (OTH)."
)
juris_cd = fields.CharField(
db_column='JURIS_CD',
max_length=4,
blank=True,
help_text="Office jurisdiction code. See CAL document for a \
list of legal values."
)
juris_dscr = fields.CharField(
db_column='JURIS_DSCR',
max_length=40,
blank=True,
help_text="Office jurisdiction description provided if the \
jurisdiction code (JURIS_CD) equals other (OTH)."
)
dist_no = fields.CharField(
db_column='DIST_NO',
max_length=4,
blank=True,
help_text="Office district number for Senate, Assembly, and Board \
of Equalization districts."
)
off_s_h_cd = fields.CharField(
db_column='OFF_S_H_CD',
max_length=4,
blank=True,
help_text="Office sought/held code. Legal values are 'S' for sought \
and 'H' for held."
)
non_pty_cb = fields.CharField(
db_column='NON_PTY_CB',
max_length=4,
blank=True,
help_text="Non-partisan check-box. Legal values are 'X' and null."
)
party_name = fields.CharField(
db_column='PARTY_NAME',
max_length=63,
blank=True,
help_text="Name of party (if partisan)"
)
bal_num = fields.CharField(
db_column='BAL_NUM',
max_length=7,
blank=True,
help_text="Ballot measure number or letter"
)
bal_juris = fields.CharField(
db_column='BAL_JURIS',
max_length=40,
blank=True,
help_text="Jurisdiction of ballot measure"
)
sup_opp_cd = fields.CharField(
db_column='SUP_OPP_CD',
max_length=4,
blank=True,
help_text="Support/oppose code (S/O). Legal values are 'S' for \
support and 'O' for oppose."
)
year_elect = fields.CharField(
db_column='YEAR_ELECT',
max_length=4,
blank=True,
help_text="Year of election"
)
pof_title = fields.CharField(
db_column='POF_TITLE',
max_length=44,
blank=True,
help_text="Position/title of the principal officer"
)
class Meta:
app_label = 'calaccess_raw'
db_table = 'CVR2_SO_CD'
verbose_name = 'CVR2_SO_CD'
verbose_name_plural = 'CVR2_SO_CD'
def __str__(self):
return str(self.filing_id)
@python_2_unicode_compatible
class CvrCampaignDisclosureCd(CalAccessBaseModel):
"""
Cover page information from campaign disclosure forms
"""
amend_id = fields.IntegerField(
db_column='AMEND_ID',
db_index=True,
help_text="Amendment identification number. A number of 0 is the \
original filing and 1 to 999 amendments.",
verbose_name="amendment ID"
)
amendexp_1 = fields.CharField(
max_length=100,
db_column='AMENDEXP_1',
blank=True,
help_text='Amendment explanation line 1'
)
amendexp_2 = fields.CharField(
max_length=100,
db_column='AMENDEXP_2',
blank=True,
help_text="Amendment explanation line 2"
)
amendexp_3 = fields.CharField(
max_length=100,
db_column='AMENDEXP_3',
blank=True,
help_text="Amendment explanation line 3"
)
assoc_cb = fields.CharField(
max_length=4,
db_column='ASSOC_CB',
blank=True,
help_text="Association Interests info included check-box. Legal \
values are 'X' and null."
)
assoc_int = fields.CharField(
max_length=90,
db_column='ASSOC_INT',
blank=True,
help_text="Description of association interests"
)
bal_id = fields.CharField(
max_length=9,
db_column='BAL_ID',
blank=True,
help_text="This field is undocumented"
)
bal_juris = fields.CharField(
max_length=40,
db_column='BAL_JURIS',
blank=True,
help_text="Ballot measure jurisdiction"
)
bal_name = fields.CharField(
max_length=200,
db_column='BAL_NAME',
blank=True,
help_text="Ballot measure name"
)
bal_num = fields.CharField(
max_length=4,
db_column='BAL_NUM',
blank=True,
help_text="Ballot measure number or letter"
)
brdbase_yn = fields.CharField(
max_length=1,
db_column='BRDBASE_YN',
blank=True,
help_text="Broad Base Committee (yes/no) check box. Legal \
values are 'Y' or 'N'."
)
# bus_adr1 = fields.CharField(
# max_length=55, db_column='BUS_ADR1', blank=True
# )
# bus_adr2 = fields.CharField(
# max_length=55, db_column='BUS_ADR2', blank=True
# )
bus_city = fields.CharField(
max_length=30,
db_column='BUS_CITY',
blank=True,
help_text="Employer/business address city"
)
bus_inter = fields.CharField(
max_length=40,
db_column='BUS_INTER',
blank=True,
help_text="Employer/business interest description"
)
bus_name = fields.CharField(
max_length=200,
db_column='BUS_NAME',
blank=True,
help_text="Name of employer/business. Applies to the form 461."
)
bus_st = fields.CharField(
max_length=2,
db_column='BUS_ST',
blank=True,
help_text="Employer/business address state"
)
bus_zip4 = fields.CharField(
max_length=10,
db_column='BUS_ZIP4',
blank=True,
help_text="Employer/business address ZIP Code"
)
busact_cb = fields.CharField(
max_length=10,
db_column='BUSACT_CB',
blank=True,
help_text="Business activity info included check-box. Valid values \
are 'X' and null"
)
busactvity = fields.CharField(
max_length=90,
db_column='BUSACTVITY',
blank=True,
help_text="Business activity description"
)
# cand_adr1 = fields.CharField(
# max_length=55, db_column='CAND_ADR1', blank=True
# )
# cand_adr2 = fields.CharField(
# max_length=55, db_column='CAND_ADR2', blank=True
# )
cand_city = fields.CharField(
max_length=30,
db_column='CAND_CITY',
blank=True,
help_text='Candidate/officeholder city'
)
cand_email = fields.CharField(
max_length=60,
db_column='CAND_EMAIL',
blank=True,
help_text='Candidate/officeholder email. This field \
is not contained on the forms.'
)
cand_fax = fields.CharField(
max_length=20,
db_column='CAND_FAX',
blank=True,
help_text='Candidate/officeholder fax. This field \
is not contained on the forms.'
)
cand_id = fields.CharField(
max_length=9,
db_column='CAND_ID',
blank=True,
help_text="This field is not documented"
)
cand_namf = fields.CharField(
max_length=45,
db_column='CAND_NAMF',
blank=True,
help_text='Candidate/officeholder first name'
)
cand_naml = fields.CharField(
max_length=200,
db_column='CAND_NAML',
blank=True,
help_text="Candidate/officeholder's last name. Applies to forms \
460, 465, and 496."
)
cand_nams = fields.CharField(
max_length=10,
db_column='CAND_NAMS',
blank=True,
help_text="Candidate/officeholder's name suffix"
)
cand_namt = fields.CharField(
max_length=10,
db_column='CAND_NAMT',
blank=True,
help_text="Candidate/officeholder's prefix or title"
)
cand_phon = fields.CharField(
max_length=20,
db_column='CAND_PHON',
blank=True,
help_text='Candidate/officeholder phone'
)
cand_st = fields.CharField(
max_length=4,
db_column='CAND_ST',
blank=True,
help_text="Candidate/officeholder's state"
)
cand_zip4 = fields.CharField(
max_length=10,
db_column='CAND_ZIP4',
blank=True,
help_text="Candidate/officeholder's ZIP Code"
)
cmtte_id = fields.CharField(
max_length=9,
db_column='CMTTE_ID',
blank=True,
verbose_name="Committee ID",
help_text="Committee ID (Filer_id) of recipient Committee who's \
campaign statement is attached. This field applies to the form 401."
)
cmtte_type = fields.CharField(
max_length=1,
db_column='CMTTE_TYPE',
blank=True,
verbose_name="Committee type",
help_text="Type of Recipient Committee. Applies to the 450/460."
)
control_yn = fields.IntegerField(
null=True,
db_column='CONTROL_YN',
blank=True,
help_text="Controlled Committee (yes/no) check box. Legal values \
are 'Y' or 'N'."
)
dist_no = fields.CharField(
max_length=4,
db_column='DIST_NO',
blank=True,
help_text="District number for the office being sought. Populated \
for Senate, Assembly, or Board of Equalization races."
)
elect_date = fields.DateTimeField(
null=True,
db_column='ELECT_DATE',
blank=True,
help_text="Date of the General Election"
)
emplbus_cb = fields.CharField(
max_length=4,
db_column='EMPLBUS_CB',
blank=True,
help_text="Employer/Business Info included check-box. Legal \
values are 'X' or null. Applies to the Form 461."
)
employer = fields.CharField(
max_length=200,
db_column='EMPLOYER',
blank=True,
help_text="Employer. This field is most likely unused."
)
ENTITY_CODE_CHOICES = (
# Defined here:
# http://www.documentcloud.org/documents/1308003-cal-access-cal-\
# format.html#document/p9
('', 'Unknown'),
('BMC', 'Ballot measure committee'),
('CAO', 'Candidate/officeholder'),
('COM', 'Committee'),
('CTL', 'Controlled committee'),
('IND', 'Person (Spending > $5,000)'),
('MDI', 'Major donor/independent expenditure'),
('OTH', 'Other'),
('PTY', 'Political party'),
('RCP', 'Recipient committee'),
('SCC', 'Small contributor committee'),
('SMO', 'Slate mailer organization'),
)
entity_cd = fields.CharField(
max_length=4,
db_column='ENTITY_CD',
blank=True,
choices=ENTITY_CODE_CHOICES,
verbose_name='entity code'
)
file_email = fields.CharField(
max_length=60,
db_column='FILE_EMAIL',
blank=True,
help_text="Filer's email address"
)
# filer_adr1 = fields.CharField(
# max_length=55, db_column='FILER_ADR1', blank=True
# )
# filer_adr2 = fields.CharField(
# max_length=55, db_column='FILER_ADR2', blank=True
# )
filer_city = fields.CharField(
max_length=30,
db_column='FILER_CITY',
blank=True,
help_text="Filer's city"
)
filer_fax = fields.CharField(
max_length=20,
db_column='FILER_FAX',
blank=True,
help_text="Filer's fax"
)
filer_id = fields.CharField(
verbose_name='filer ID',
db_column='FILER_ID',
max_length=15,
blank=True,
db_index=True,
help_text="Filer's unique identification number",
)
filer_namf = fields.CharField(
max_length=45,
db_column='FILER_NAMF',
blank=True,
help_text="Filer's first name, if an individual"
)
filer_naml = fields.CharField(
max_length=200,
db_column='FILER_NAML',
help_text="The committee's or organization's name or if an \
individual the filer's last name."
)
filer_nams = fields.CharField(
max_length=10,
db_column='FILER_NAMS',
blank=True,
help_text="Filer's suffix, if an individual"
)
filer_namt = fields.CharField(
max_length=10,
db_column='FILER_NAMT',
blank=True,
help_text="Filer's title or prefix, if an individual"
)
filer_phon = fields.CharField(
max_length=20,
db_column='FILER_PHON',
blank=True,
help_text="Filer phone number"
)
filer_st = fields.CharField(
max_length=4,
db_column='FILER_ST',
blank=True,
help_text="Filer state"
)
filer_zip4 = fields.CharField(
max_length=10,
db_column='FILER_ZIP4',
blank=True,
help_text="Filer ZIP Code"
)
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing ID',
help_text="Unique filing identificiation number"
)
FORM_TYPE_CHOICES = (
('F511', 'Form 511 (Paid spokesman report)'),
('F900', 'Form 900 (Public employee\'s retirement board, \
candidate campaign statement)'),
('F425', 'Form 425 (Semi-annual statement of no activity, \
non-controlled recipient committee)'),
('F450', 'Form 450 (Recipient committee campaign statement, \
short form)'),
('F401', 'Form 401 (Slate mailer organization campaign statement)'),
('F498', 'Form 498 (Late payment report, slate mailer organizations'),
('F465', 'Form 465 (Supplemental independent expenditure report'),
('F496', 'Form 496 (Late independent expenditure report)'),
('F461', 'Form 461 (Independent expenditure committee \
and major donor committee campaign statement)'),
('F460', 'Form 460 (Recipient committee campaign statement)'),
('F497', 'Form 497 (Late contribution report)')
)
form_type = fields.CharField(
choices=FORM_TYPE_CHOICES,
max_length=4,
db_column='FORM_TYPE',
help_text='Name of the source filing form or schedule'
)
from_date = fields.DateTimeField(
null=True,
db_column='FROM_DATE',
blank=True,
help_text="Reporting period from date"
)
juris_cd = fields.CharField(
max_length=3,
db_column='JURIS_CD',
blank=True,
help_text="Office jurisdiction code"
)
juris_dscr = fields.CharField(
max_length=40,
db_column='JURIS_DSCR',
blank=True,
help_text="Office Jurisdiction description if the field JURIS_CD is \
set to city (CIT), county (CTY), local (LOC), or other \
(OTH)."
)
late_rptno = fields.CharField(
max_length=30,
db_column='LATE_RPTNO',
blank=True,
help_text="Identifying Report Number used to distinguish multiple \
reports filed during the same filing period. For example, \
this field allows for multiple form 497s to be filed on the \
same day."
)
# mail_adr1 = fields.CharField(
# max_length=55, db_column='MAIL_ADR1', blank=True
# )
# mail_adr2 = fields.CharField(
# max_length=55, db_column='MAIL_ADR2', blank=True
# )
mail_city = fields.CharField(
max_length=30,
db_column='MAIL_CITY',
blank=True,
help_text="Filer mailing address city"
)
mail_st = fields.CharField(
max_length=4,
db_column='MAIL_ST',
blank=True,
help_text="Filer mailing address state"
)
mail_zip4 = fields.CharField(
max_length=10,
db_column='MAIL_ZIP4',
blank=True,
help_text="Filer mailing address ZIP Code"
)
occupation = fields.CharField(
max_length=60,
db_column='OCCUPATION',
blank=True,
help_text="Occupation. This field is most likely unused."
)
off_s_h_cd = fields.CharField(
max_length=1,
db_column='OFF_S_H_CD',
blank=True,
help_text='Office Sought/Held Code. Legal values are "S" for \
sought and "H" for held.'
)
offic_dscr = fields.CharField(
max_length=40,
db_column='OFFIC_DSCR',
blank=True,
help_text="Office sought description if the field OFFICE_CD is set \
to other (OTH)"
)
office_cd = fields.CharField(
max_length=3,
db_column='OFFICE_CD',
blank=True,
verbose_name="Office code",
help_text="Code that identifies the office being sought"
)
other_cb = fields.CharField(
max_length=1,
db_column='OTHER_CB',
blank=True,
help_text="Other entity interests info included check-box. Legal \
values are 'X' and null."
)
other_int = fields.CharField(
max_length=90,
db_column='OTHER_INT',
blank=True,
help_text="Other entity interests description"
)
primfrm_yn = fields.CharField(
max_length=1,
db_column='PRIMFRM_YN',
blank=True,
help_text="Primarily Formed Committee (yes/no) checkbox. Legal \
values are 'Y' or 'N'."
)
REC_TYPE_CHOICES = (
("CVR", "Cover"),
)
rec_type = fields.CharField(
verbose_name='record type',
db_column='REC_TYPE',
max_length=4,
db_index=True,
choices=REC_TYPE_CHOICES,
)
report_num = fields.CharField(
max_length=3,
db_column='REPORT_NUM',
help_text="Amendment number, as reported by the filer \
Report Number 000 represents an original filing. 001-999 are amendments."
)
reportname = fields.CharField(
max_length=3,
db_column='REPORTNAME',
blank=True,
help_text="Attached campaign disclosure statement type. Legal \
values are 450, 460, and 461."
)
rpt_att_cb = fields.CharField(
max_length=4,
db_column='RPT_ATT_CB',
blank=True,
help_text="Committee Report Attached check-box. Legal values \
are 'X' or null. This field applies to the form 401."
)
rpt_date = fields.DateTimeField(
db_column='RPT_DATE',
null=True,
help_text="Date this report was filed, according to the filer"
)
rptfromdt = fields.DateTimeField(
null=True,
db_column='RPTFROMDT',
blank=True,
help_text="Attached campaign disclosure statement - Period from \
date."
)
rptthrudt = fields.DateTimeField(
null=True,
db_column='RPTTHRUDT',
blank=True,
help_text="Attached campaign disclosure statement - Period \
through date."
)
selfemp_cb = fields.CharField(
max_length=1,
db_column='SELFEMP_CB',
blank=True,
help_text="Self employed check-box"
)
sponsor_yn = fields.IntegerField(
null=True,
db_column='SPONSOR_YN',
blank=True,
help_text="Sponsored Committee (yes/no) checkbox. Legal values \
are 'Y' or 'N'."
)
stmt_type = fields.CharField(
max_length=2,
db_column='STMT_TYPE',
blank=True,
help_text='Type of statement'
)
sup_opp_cd = fields.CharField(
max_length=1,
db_column='SUP_OPP_CD',
blank=True,
help_text='Support/oppose code. Legal values are "S" for support \
or "O" for oppose.'
)
thru_date = fields.DateTimeField(
null=True,
db_column='THRU_DATE',
blank=True,
help_text='Reporting period through date'
)
# tres_adr1 = fields.CharField(
# max_length=55, db_column='TRES_ADR1', blank=True
# )
# tres_adr2 = fields.CharField(
# max_length=55, db_column='TRES_ADR2', blank=True
# )
tres_city = fields.CharField(
max_length=30,
db_column='TRES_CITY',
blank=True,
help_text="City portion of the treasurer or responsible \
officer's street address."
)
tres_email = fields.CharField(
max_length=60,
db_column='TRES_EMAIL',
blank=True,
help_text="Treasurer or responsible officer's email"
)
tres_fax = fields.CharField(
max_length=20,
db_column='TRES_FAX',
blank=True,
help_text="Treasurer or responsible officer's fax number"
)
tres_namf = fields.CharField(
max_length=45,
db_column='TRES_NAMF',
blank=True,
help_text="Treasurer or responsible officer's first name"
)
tres_naml = fields.CharField(
max_length=200,
db_column='TRES_NAML',
blank=True,
help_text="Treasurer or responsible officer's last name"
)
tres_nams = fields.CharField(
max_length=10,
db_column='TRES_NAMS',
blank=True,
help_text="Treasurer or responsible officer's suffix"
)
tres_namt = fields.CharField(
max_length=10,
db_column='TRES_NAMT',
blank=True,
help_text="Treasurer or responsible officer's prefix or title"
)
tres_phon = fields.CharField(
max_length=20,
db_column='TRES_PHON',
blank=True,
help_text="Treasurer or responsible officer's phone number"
)
tres_st = fields.CharField(
max_length=2,
db_column='TRES_ST',
blank=True,
help_text="Treasurer or responsible officer's state"
)
tres_zip4 = fields.CharField(
max_length=10,
db_column='TRES_ZIP4',
blank=True,
help_text="Treasurer or responsible officer's ZIP Code"
)
class Meta:
app_label = 'calaccess_raw'
db_table = 'CVR_CAMPAIGN_DISCLOSURE_CD'
verbose_name = 'CVR_CAMPAIGN_DISCLOSURE_CD'
verbose_name_plural = 'CVR_CAMPAIGN_DISCLOSURE_CD'
def __str__(self):
return str(self.filing_id)
@python_2_unicode_compatible
class Cvr2CampaignDisclosureCd(CalAccessBaseModel):
"""
Record used to carry additional names for the campaign
disclosure forms below.
"""
amend_id = fields.IntegerField(
db_column='AMEND_ID',
db_index=True,
help_text="Amendment identification number. A number of 0 is the \
original filing and 1 to 999 amendments.",
verbose_name="amendment ID"
)
bal_juris = fields.CharField(
max_length=40,
db_column='BAL_JURIS',
blank=True,
help_text="Ballot measure jurisdiction"
)
bal_name = fields.CharField(
max_length=200,
db_column='BAL_NAME',
blank=True,
help_text="Ballot measure name"
)
bal_num = fields.CharField(
max_length=7,
db_column='BAL_NUM',
blank=True,
help_text="Ballot measure number or letter"
)
cmte_id = fields.CharField(
max_length=9,
db_column='CMTE_ID',
blank=True,
help_text="Commitee identification number, when the entity \
is a committee"
)
control_yn = fields.IntegerField(
null=True,
db_column='CONTROL_YN',
blank=True,
help_text='Controlled Committee (yes/no) checkbox. Legal values \
are "Y" or "N".'
)
dist_no = fields.CharField(
max_length=3,
db_column='DIST_NO',
blank=True,
help_text="District number for the office being sought. Populated \
for Senate, Assembly, or Board of Equalization races."
)
ENTITY_CODE_CHOICES = (
# Defined here:
# http://www.documentcloud.org/documents/1308003-cal-access-cal-\
# format.html#document/p9
('', 'Unknown'),
('ATR', 'Assistant treasurer'),
('BNM', 'Ballot measure\'s name/title'),
('CAO', 'Candidate/officeholder'),
('CTL', 'Controlled committee'),
('COM', 'Committee'),
('FIL', 'Candidate filing/ballot fees'),
('OFF', 'Officer (Responsible)'),
('PEX', 'PEX (Unknown)'),
('POF', 'Principal officer'),
('PRO', 'Proponent'),
('RCP', 'Recipient committee'),
('RDP', 'RDP (Unknown)'),
)
entity_cd = fields.CharField(
max_length=3,
db_column='ENTITY_CD',
blank=True,
verbose_name='entity code',
choices=ENTITY_CODE_CHOICES,
)
# enty_adr1 = fields.CharField(
# max_length=55, db_column='ENTY_ADR1', blank=True
# )
# enty_adr2 = fields.CharField(
# max_length=55, db_column='ENTY_ADR2', blank=True
# )
enty_city = fields.CharField(
max_length=30,
db_column='ENTY_CITY',
blank=True,
help_text="Entity city"
)
enty_email = fields.CharField(
max_length=60,
db_column='ENTY_EMAIL',
blank=True,
help_text="Entity email address"
)
enty_fax = fields.CharField(
max_length=20,
db_column='ENTY_FAX',
blank=True,
help_text="Entity fax number"
)
enty_namf = fields.CharField(
max_length=45,
db_column='ENTY_NAMF',
blank=True,
help_text="Entity first name, if an individual"
)
enty_naml = fields.CharField(
max_length=200,
db_column='ENTY_NAML',
blank=True,
help_text="Entity name, or last name if an individual"
)
enty_nams = fields.CharField(
max_length=10,
db_column='ENTY_NAMS',
blank=True,
help_text="Entity suffix, if an individual"
)
enty_namt = fields.CharField(
max_length=10,
db_column='ENTY_NAMT',
blank=True,
help_text="Entity prefix or title, if an individual"
)
enty_phon = fields.CharField(
max_length=20,
db_column='ENTY_PHON',
blank=True,
help_text="Entity phone number"
)
enty_st = fields.CharField(
max_length=2,
db_column='ENTY_ST',
blank=True,
help_text="Entity state"
)
enty_zip4 = fields.CharField(
max_length=10,
db_column='ENTY_ZIP4',
blank=True,
help_text="Entity ZIP code"
)
f460_part = fields.CharField(
max_length=2,
db_column='F460_PART',
blank=True,
help_text="Part of 460 cover page coded on ths cvr2 record. Legal \
values are 3, 4a, 4b, 5a, 5b, or 6."
)
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing ID',
help_text="Unique filing identificiation number"
)
FORM_TYPE_CHOICES = (
('F425', 'Form 425 (Semi-annual statement of no activity, \
non-controlled committees)'),
('F450', 'Form 450 (Recipient committee campaign statement, \
short form)'),
('F460', 'Form 460 (Recipient committee campaign statement)'),
('F465', 'Form 465 (Supplemental independent expenditure report)'),
)
form_type = fields.CharField(
choices=FORM_TYPE_CHOICES,
max_length=4,
db_column='FORM_TYPE',
help_text='Name of the source filing form or schedule'
)
juris_cd = fields.CharField(
max_length=3,
db_column='JURIS_CD',
blank=True,
help_text="Office jurisdiction code"
)
juris_dscr = fields.CharField(
max_length=40,
db_column='JURIS_DSCR',
blank=True,
help_text="Office jurisdiction description"
)
line_item = fields.IntegerField(
db_column='LINE_ITEM',
help_text="Line item number of this record",
db_index=True,
)
# mail_adr1 = fields.CharField(
# max_length=55, db_column='MAIL_ADR1', blank=True
# )
# mail_adr2 = fields.CharField(
# max_length=55, db_column='MAIL_ADR2', blank=True
# )
mail_city = fields.CharField(
max_length=30,
db_column='MAIL_CITY',
blank=True,
help_text="Filer's mailing city"
)
mail_st = fields.CharField(
max_length=2,
db_column='MAIL_ST',
blank=True,
help_text="Filer's mailing state"
)
mail_zip4 = fields.CharField(
max_length=10,
db_column='MAIL_ZIP4',
blank=True,
help_text="Filer's mailing ZIP Code"
)
off_s_h_cd = fields.CharField(
max_length=1,
db_column='OFF_S_H_CD',
blank=True,
help_text='Office sought/held code. Indicates if the candidate is an \
incumbent. Legal values are "S" for sought and "H" for held.'
)
offic_dscr = fields.CharField(
max_length=40,
db_column='OFFIC_DSCR',
blank=True,
help_text="Office sought description"
)
office_cd = fields.CharField(
max_length=3,
db_column='OFFICE_CD',
blank=True,
verbose_name="Office code",
help_text="Code that identifies the office being sought"
)
REC_TYPE_CHOICES = (
("CVR2", "Cover, Page 2"),
)
rec_type = fields.CharField(
verbose_name='record type',
db_column='REC_TYPE',
max_length=4,
db_index=True,
choices=REC_TYPE_CHOICES,
)
sup_opp_cd = fields.CharField(
max_length=1,
db_column='SUP_OPP_CD',
blank=True,
help_text='Support/Oppose (S/O) code for the ballot measure. \
Legal values are "S" for support or "O" for oppose.'
)
title = fields.CharField(
max_length=90,
db_column='TITLE',
blank=True,
help_text="Official title of filing officer. Applies to the form 465."
)
tran_id = fields.CharField(
verbose_name='transaction ID',
max_length=20,
db_column='TRAN_ID',
blank=True,
help_text='Permanent value unique to this item',
)
tres_namf = fields.CharField(
max_length=45,
db_column='TRES_NAMF',
blank=True,
help_text="Treasurer or responsible officer's first name"
)
tres_naml = fields.CharField(
max_length=200,
db_column='TRES_NAML',
blank=True,
help_text="Treasurer or responsible officer's last name"
)
tres_nams = fields.CharField(
max_length=10,
db_column='TRES_NAMS',
blank=True,
help_text="Treasurer or responsible officer's suffix"
)
tres_namt = fields.CharField(
max_length=10,
db_column='TRES_NAMT',
blank=True,
help_text="Treasurer or responsible officer's prefix or title"
)
class Meta:
app_label = 'calaccess_raw'
db_table = 'CVR2_CAMPAIGN_DISCLOSURE_CD'
verbose_name = 'CVR2_CAMPAIGN_DISCLOSURE_CD'
verbose_name_plural = 'CVR2_CAMPAIGN_DISCLOSURE_CD'
def __str__(self):
return str(self.filing_id)
@python_2_unicode_compatible
class RcptCd(CalAccessBaseModel):
"""
Receipts schedules for the following forms.
Form 460 (Recipient Committee Campaign Statement)
Schedules A, C, I, and A-1.
Form 401 (Slate Mailer Organization Campaign Statement) Schedule A.
"""
amend_id = fields.IntegerField(
db_column='AMEND_ID',
db_index=True,
help_text="Amendment identification number. A number of 0 is the \
original filing and 1 to 999 amendments.",
verbose_name="amendment ID"
)
amount = fields.DecimalField(
decimal_places=2,
max_digits=14,
db_column='AMOUNT',
help_text="Amount Received (Monetary, Inkkind, Promise)"
)
bakref_tid = fields.CharField(
max_length=20,
db_column='BAKREF_TID',
blank=True,
help_text="Back Reference to a transaction identifier of a parent \
record"
)
bal_juris = fields.CharField(
max_length=40,
db_column='BAL_JURIS',
blank=True,
help_text="Jurisdiction of ballot measure. Used on the Form 401 \
Schedule A"
)
bal_name = fields.CharField(
max_length=200,
db_column='BAL_NAME',
blank=True,
help_text="Ballot measure name. Used on the Form 401 Schedule A"
)
bal_num = fields.CharField(
max_length=7,
db_column='BAL_NUM',
blank=True,
help_text="Ballot measure number or letter. Used on the Form 401 \
Schedule A"
)
cand_namf = fields.CharField(
max_length=45,
db_column='CAND_NAMF',
blank=True,
help_text="Candidate/officeholder's first name. Used on the Form \
401 Schedule A"
)
cand_naml = fields.CharField(
max_length=200,
db_column='CAND_NAML',
blank=True,
help_text="Candidate/officeholder's last name. Used on the Form \
401 Schedule A"
)
cand_nams = fields.CharField(
max_length=10,
db_column='CAND_NAMS',
blank=True,
help_text="Candidate/officeholder's name suffix. Used on the Form \
401 Schedule A"
)
cand_namt = fields.CharField(
max_length=10,
db_column='CAND_NAMT',
blank=True,
help_text="Candidate/officeholder's name prefix or title. Used on \
the Form 401 Schedule A"
)
cmte_id = fields.CharField(
max_length=9,
db_column='CMTE_ID',
blank=True,
help_text="Committee Identification number"
)
# ctrib_adr1 = fields.CharField(
# max_length=55,
# db_column='CTRIB_ADR1',
# blank=True,
# default="",
# help_text="First line of the contributor's street address"
# )
# ctrib_adr2 = fields.CharField(
# max_length=55,
# db_column='CTRIB_ADR2',
# blank=True,
# help_text="Second line of the contributor's street address"
# )
ctrib_city = fields.CharField(
max_length=30,
db_column='CTRIB_CITY',
blank=True,
help_text="Contributor's City"
)
ctrib_dscr = fields.CharField(
max_length=90,
db_column='CTRIB_DSCR',
blank=True,
help_text="Description of goods/services received"
)
ctrib_emp = fields.CharField(
max_length=200,
db_column='CTRIB_EMP',
blank=True,
help_text="Employer"
)
ctrib_namf = fields.CharField(
max_length=45,
db_column='CTRIB_NAMF',
blank=True,
help_text="Contributor's First Name"
)
ctrib_naml = fields.CharField(
max_length=200,
db_column='CTRIB_NAML',
help_text="Contributor's last name or business name"
)
ctrib_nams = fields.CharField(
max_length=10,
db_column='CTRIB_NAMS',
blank=True,
help_text="Contributor's Suffix"
)
ctrib_namt = fields.CharField(
max_length=10,
db_column='CTRIB_NAMT',
blank=True,
help_text="Contributor's Prefix or Title"
)
ctrib_occ = fields.CharField(
max_length=60,
db_column='CTRIB_OCC',
blank=True,
help_text="Occupation"
)
ctrib_self = fields.CharField(
max_length=1,
db_column='CTRIB_SELF',
blank=True,
help_text="Self Employed Check-box"
)
ctrib_st = fields.CharField(
max_length=2,
db_column='CTRIB_ST',
blank=True,
help_text="Contributor's State"
)
ctrib_zip4 = fields.CharField(
max_length=10,
db_column='CTRIB_ZIP4',
blank=True,
help_text="Contributor's ZIP+4"
)
cum_oth = fields.DecimalField(
decimal_places=2,
null=True,
max_digits=14,
db_column='CUM_OTH',
blank=True,
help_text="Cumulative Other (Sched A, A-1)"
)
cum_ytd = fields.DecimalField(
decimal_places=2,
null=True,
max_digits=14,
db_column='CUM_YTD',
blank=True,
help_text="Cumulative year to date amount (Form 460 Schedule A \
and Form 401 Schedule A, A-1)"
)
date_thru = fields.DateField(
null=True,
db_column='DATE_THRU',
blank=True,
help_text="End of date range for items received"
)
dist_no = fields.CharField(
max_length=3,
db_column='DIST_NO',
blank=True,
help_text="Office District Number (used on F401A)"
)
ENTITY_CODE_CHOICES = (
# Defined here:
# http://www.documentcloud.org/documents/1308003-cal-access-cal-\
# format.html#document/p9
("", "None"),
("0", "0 (Unknown)"),
("BNM", "Ballot measure\'s name/title"),
("COM", "Committee"),
("IND", "Individual"),
("OFF", "Officer (Responsible)"),
("OTH", "Other"),
("PTY", "Political party"),
("RCP", "Recipient commmittee"),
("SCC", "Small contributor committee"),
)
entity_cd = fields.CharField(
max_length=3,
db_column='ENTITY_CD',
blank=True,
help_text="Entity code: Values [CMO|RCP|IND|OTH]",
choices=ENTITY_CODE_CHOICES
)
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing ID',
help_text="Unique filing identificiation number"
)
FORM_TYPE_CHOICES = (
('F900', 'Form 900 (Public employee\'s retirement board, \
candidate campaign statement): Schedule A'),
('A-1', 'Form 460: Schedule A-1, contributions transferred \
to special election committees'),
('E530', 'Form E530 (Issue advocacy receipts)'),
('F496P3', 'Form 496 (Late independent expenditure): \
Part 3, contributions > $100 received'),
('F401A', 'Form 401 (Slate mailer organization): Schedule A, \
payments received'),
('I', 'Form 460 (Recipient committee campaign statement): \
Schedule I, miscellanous increases to cash'),
('C', 'Form 460 (Recipient committee campaign statement): \
Schedule C, non-monetary contributions received'),
('A', 'Form 460 (Recipient committee campaign statement): \
Schedule A, monetary contributions received')
)
form_type = fields.CharField(
choices=FORM_TYPE_CHOICES,
max_length=9,
db_column='FORM_TYPE',
help_text='Name of the source filing form or schedule'
)
int_rate = fields.CharField(
max_length=9,
db_column='INT_RATE',
blank=True,
help_text="This field is undocumented"
)
# intr_adr1 = fields.CharField(
# max_length=55,
# db_column='INTR_ADR1',
# blank=True,
# help_text="First line of the intermediary's street address."
# )
# intr_adr2 = fields.CharField(
# max_length=55,
# db_column='INTR_ADR2',
# blank=True,
# help_text="Second line of the Intermediary's street address."
# )
intr_city = fields.CharField(
max_length=30,
db_column='INTR_CITY',
blank=True,
help_text="Intermediary's City"
)
intr_cmteid = fields.CharField(
max_length=9,
db_column='INTR_CMTEID',
blank=True,
help_text="This field is undocumented"
)
intr_emp = fields.CharField(
max_length=200,
db_column='INTR_EMP',
blank=True,
help_text="Intermediary's Employer"
)
intr_namf = fields.CharField(
max_length=45,
db_column='INTR_NAMF',
blank=True,
help_text="Intermediary's First Name"
)
intr_naml = fields.CharField(
max_length=200,
db_column='INTR_NAML',
blank=True,
help_text="Intermediary's Last Name"
)
intr_nams = fields.CharField(
max_length=10,
db_column='INTR_NAMS',
blank=True,
help_text="Intermediary's Suffix"
)
intr_namt = fields.CharField(
max_length=10,
db_column='INTR_NAMT',
blank=True,
help_text="Intermediary's Prefix or Title"
)
intr_occ = fields.CharField(
max_length=60,
db_column='INTR_OCC',
blank=True,
help_text="Intermediary's Occupation"
)
intr_self = fields.CharField(
max_length=1,
db_column='INTR_SELF',
blank=True,
help_text="Intermediary's self employed check box"
)
intr_st = fields.CharField(
max_length=2,
db_column='INTR_ST',
blank=True,
help_text="Intermediary's state"
)
intr_zip4 = fields.CharField(
max_length=10,
db_column='INTR_ZIP4',
blank=True,
help_text="Intermediary's zip code"
)
juris_cd = fields.CharField(
max_length=3,
db_column='JURIS_CD',
blank=True,
help_text="Office jurisdiction code. See the CAL document for the \
list of legal values. Used on Form 401 Schedule A"
)
juris_dscr = fields.CharField(
max_length=40,
db_column='JURIS_DSCR',
blank=True,
help_text="Office Jurisdiction Description (used on F401A)"
)
line_item = fields.IntegerField(
db_column='LINE_ITEM',
help_text="Line item number of this record",
db_index=True,
)
memo_code = fields.CharField(
max_length=1,
db_column='MEMO_CODE',
blank=True,
help_text="Memo amount flag (Date/Amount are informational only)"
)
memo_refno = fields.CharField(
max_length=20,
db_column='MEMO_REFNO',
blank=True,
help_text="Reference to text contained in a TEXT record"
)
off_s_h_cd = fields.CharField(
max_length=1,
db_column='OFF_S_H_CD',
blank=True,
help_text="Office Sought/Held Code. Used on the Form 401 \
Schedule A. Legal values are 'S' for sought and 'H' for \
held"
)
offic_dscr = fields.CharField(
max_length=40,
db_column='OFFIC_DSCR',
blank=True,
help_text="Office Sought Description (used on F401A)"
)
office_cd = fields.CharField(
max_length=3,
db_column='OFFICE_CD',
blank=True,
help_text="Code that identifies the office being sought. See the \
CAL document for a list of valid codes. Used on the \
Form 401 Schedule A)"
)
rcpt_date = fields.DateField(
db_column='RCPT_DATE',
null=True,
help_text="Date item received"
)
REC_TYPE_CHOICES = (
("E530", "E530"),
("RCPT", "RCPT"),
)
rec_type = fields.CharField(
verbose_name='record type',
db_column='REC_TYPE',
max_length=4,
db_index=True,
choices=REC_TYPE_CHOICES,
)
sup_opp_cd = fields.CharField(
max_length=1,
db_column='SUP_OPP_CD',
blank=True,
help_text="Support/oppose code. Legal values are 'S' for support \
or 'O' for oppose. Used on Form 401 Sechedule A. \
Transaction identifier - permanent value unique to this item"
)
tran_id = fields.CharField(
verbose_name='transaction ID',
max_length=20,
db_column='TRAN_ID',
blank=True,
help_text='Permanent value unique to this item',
)
tran_type = fields.CharField(
max_length=1,
db_column='TRAN_TYPE',
blank=True,
help_text="Transaction Type: Values T- third party | F Forgiven \
loan | R Returned (Negative amount)"
)
# tres_adr1 = fields.CharField(
# max_length=55,
# db_column='TRES_ADR1',
# blank=True,
# help_text="First line of the treasurer or responsible officer's \
# street address"
# )
# tres_adr2 = fields.CharField(
# max_length=55,
# db_column='TRES_ADR2',
# blank=True,
# help_text="Second line of the treasurer or responsible officer's \
# street address"
# )
tres_city = fields.CharField(
max_length=30,
db_column='TRES_CITY',
blank=True,
help_text="City portion of the treasurer or responsible officer's \
street address"
)
tres_namf = fields.CharField(
max_length=45,
db_column='TRES_NAMF',
blank=True,
help_text="Treasurer or responsible officer's first name"
)
tres_naml = fields.CharField(
max_length=200,
db_column='TRES_NAML',
blank=True,
help_text="Treasurer or responsible officer's last name"
)
tres_nams = fields.CharField(
max_length=10,
db_column='TRES_NAMS',
blank=True,
help_text="Treasurer or responsible officer's suffix"
)
tres_namt = fields.CharField(
max_length=10,
db_column='TRES_NAMT',
blank=True,
help_text="Treasurer or responsible officer's prefix or title"
)
tres_st = fields.CharField(
max_length=2,
db_column='TRES_ST',
blank=True,
help_text="State portion of the treasurer or responsible officer's \
address"
)
tres_zip4 = fields.CharField(
null=True,
max_length=10,
blank=True,
db_column='TRES_ZIP4',
help_text="Zip code portion of the treasurer or responsible officer's \
address"
)
xref_match = fields.CharField(
max_length=1,
db_column='XREF_MATCH',
blank=True,
help_text="Related item on other schedule has same transaction \
identifier. 'X' indicates this condition is true"
)
xref_schnm = fields.CharField(
max_length=2,
db_column='XREF_SCHNM',
blank=True,
help_text="Related record is included on Sched 'B2' or 'F'"
)
class Meta:
app_label = 'calaccess_raw'
db_table = 'RCPT_CD'
verbose_name = 'RCPT_CD'
verbose_name_plural = 'RCPT_CD'
def __str__(self):
return str(self.filing_id)
@python_2_unicode_compatible
class Cvr3VerificationInfoCd(CalAccessBaseModel):
"""
Cover page verification information from campaign disclosure forms
"""
UNIQUE_KEY = (
"FILING_ID",
"AMEND_ID",
"LINE_ITEM",
"REC_TYPE",
"FORM_TYPE"
)
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing ID',
help_text="Unique filing identificiation number"
)
amend_id = fields.IntegerField(
db_column='AMEND_ID',
db_index=True,
help_text="Amendment identification number. A number of 0 is the \
original filing and 1 to 999 amendments.",
verbose_name="amendment ID"
)
line_item = fields.IntegerField(
db_column='LINE_ITEM',
help_text="Line item number of this record",
db_index=True,
)
REC_TYPE_CHOICES = (
("CVR3", "CVR3"),
)
rec_type = fields.CharField(
verbose_name='record type',
db_column='REC_TYPE',
max_length=4,
db_index=True,
choices=REC_TYPE_CHOICES,
)
FORM_TYPE_CHOICES = (
('F400', 'Form 400 (Statement of organization, \
slate mailer organization)'),
('F401', 'Form 401 (Slate mailer organization campaign statement)'),
('F402', 'Form 402 (Statement of termination, \
slate mailer organization'),
('F410', 'Form 410 (Statement of organization, recipient committee)'),
('F425', 'Form 425 (Semi-annual statement of no activity, \
non-controlled committees)'),
('F450', 'Form 450 (Recipient committee campaign statement, \
short form)'),
('F460', 'Form 460 (Recipient committee campaign statement)'),
('F461', 'Form 461 (Independent expenditure and major donor \
committee campaign statement)'),
('F465', 'Form 465 (Supplemental independent expenditure report)'),
('F511', 'Form 511 (Paid spokesman report)'),
('F900', 'Form 900 (Public employee\'s retirement board, \
candidate campaign statement)'),
)
form_type = fields.CharField(
db_column='FORM_TYPE',
max_length=4,
help_text='Name of the source filing form or schedule',
db_index=True,
choices=FORM_TYPE_CHOICES,
)
tran_id = fields.CharField(
verbose_name='transaction ID',
max_length=20,
db_column='TRAN_ID',
blank=True,
help_text='Permanent value unique to this item',
)
ENTITY_CODE_CHOICES = (
# Defined here:
# http://www.documentcloud.org/documents/1308003-cal-access-cal-\
# format.html#document/p9
('', 'Unknown'),
('0', '0 (Unknown)'),
('ATR', 'Assistant treasurer'),
('BBB', 'BBB (Unknown)'),
('COA', 'COA (Unknown)'),
('CAO', 'Candidate/officeholder'),
('CON', 'State controller'),
('MAI', 'MAI (Unknown)'),
('MDI', 'Major donor/independent expenditure'),
('OFF', 'Officer (Responsible)'),
('POF', 'Principal officer'),
('PRO', 'Proponent'),
('RCP', 'Recipient committee'),
('SPO', 'Sponsor'),
('TRE', 'Treasurer'),
)
entity_cd = fields.CharField(
db_column='ENTITY_CD',
max_length=3,
blank=True,
verbose_name='entity code',
choices=ENTITY_CODE_CHOICES,
)
sig_date = fields.DateField(
verbose_name='signed date',
db_column='SIG_DATE',
blank=True,
null=True,
help_text='date when signed',
)
sig_loc = fields.CharField(
verbose_name='signed location',
db_column='SIG_LOC',
max_length=39,
blank=True,
help_text='city and state where signed',
)
sig_naml = fields.CharField(
verbose_name='last name',
db_column='SIG_NAML',
max_length=56,
blank=True,
help_text='last name of the signer',
)
sig_namf = fields.CharField(
verbose_name='first name',
db_column='SIG_NAMF',
max_length=45,
blank=True,
help_text='first name of the signer',
)
sig_namt = fields.CharField(
verbose_name='title',
db_column='SIG_NAMT',
max_length=10,
blank=True,
help_text='title of the signer',
)
sig_nams = fields.CharField(
verbose_name='suffix',
db_column='SIG_NAMS',
max_length=8,
blank=True,
help_text='suffix of the signer',
)
class Meta:
app_label = 'calaccess_raw'
db_table = 'CVR3_VERIFICATION_INFO_CD'
verbose_name = 'CVR3_VERIFICATION_INFO_CD'
verbose_name_plural = 'CVR3_VERIFICATION_INFO_CD'
def __str__(self):
return str(self.filing_id)
@python_2_unicode_compatible
class LoanCd(CalAccessBaseModel):
"""
Loans received and made
"""
amend_id = fields.IntegerField(
db_column='AMEND_ID',
db_index=True,
help_text="Amendment identification number. A number of 0 is the \
original filing and 1 to 999 amendments.",
verbose_name="amendment ID"
)
bakref_tid = fields.CharField(
max_length=20,
db_column='BAKREF_TID',
blank=True,
help_text="Back Reference to transaction identifier of parent record"
)
cmte_id = fields.CharField(
max_length=9,
db_column='CMTE_ID',
blank=True,
verbose_name="Committee ID",
help_text="Committee identification number"
)
ENTITY_CODE_CHOICES = (
# Defined here:
# http://www.documentcloud.org/documents/1308003-cal-access-cal-\
# format.html#document/p9
('', 'Unknown'),
('COM', "Committee"),
("IND", "Person (spending > $5,000)"),
("OTH", "Other"),
("PTY", "Political party"),
('RCP', 'Recipient committee'),
('SCC', 'Small contributor committee'),
)
entity_cd = fields.CharField(
max_length=3,
db_column='ENTITY_CD',
blank=True,
verbose_name="entity code",
choices=ENTITY_CODE_CHOICES,
)
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing ID',
help_text="Unique filing identificiation number"
)
FORM_TYPE_CHOICES = (
('B1', 'Form 460 (Recipient committee campaign statement): \
Schedule B1'),
('B2', 'Form 460 (Recipient committee campaign statement): \
Schedule B2'),
('B3', 'Form 460 (Recipient committee campaign statement): \
Schedule B3'),
('H', 'Form 460 (Recipient committee campaign statement): \
Schedule H'),
('H1', 'Form 460 (Recipient committee campaign statement): \
Schedule H1'),
('H2', 'Form 460 (Recipient committee campaign statement): \
Schedule H2'),
('H3', 'Form 460 (Recipient committee campaign statement): \
Schedule H3'),
)
form_type = fields.CharField(
max_length=2,
db_column='FORM_TYPE',
choices=FORM_TYPE_CHOICES,
help_text='Name of the source filing form or schedule'
)
# intr_adr1 = fields.CharField(
# max_length=55, db_column='INTR_ADR1', blank=True
# )
# intr_adr2 = fields.CharField(
# max_length=55, db_column='INTR_ADR2', blank=True
# )
intr_city = fields.CharField(
max_length=30,
db_column='INTR_CITY',
blank=True,
help_text="Intermediary's city"
)
intr_namf = fields.CharField(
max_length=45,
db_column='INTR_NAMF',
blank=True,
help_text="Intermediary's first name"
)
intr_naml = fields.CharField(
max_length=200,
db_column='INTR_NAML',
blank=True,
help_text="Intermediary's last name"
)
intr_nams = fields.CharField(
max_length=10,
db_column='INTR_NAMS',
blank=True,
help_text="Intermediary's suffix"
)
intr_namt = fields.CharField(
max_length=10,
db_column='INTR_NAMT',
blank=True,
help_text="Intermediary's title or prefix"
)
intr_st = fields.CharField(
max_length=2,
db_column='INTR_ST',
blank=True,
help_text="Intermediary's state"
)
intr_zip4 = fields.CharField(
max_length=10,
db_column='INTR_ZIP4',
blank=True,
help_text="Intermediary's ZIP Code"
)
line_item = fields.IntegerField(
db_column='LINE_ITEM',
help_text="Line item number of this record",
db_index=True,
)
lndr_namf = fields.CharField(
max_length=45,
db_column='LNDR_NAMF',
blank=True,
help_text="Lender's first name"
)
lndr_naml = fields.CharField(
max_length=200,
db_column='LNDR_NAML',
help_text="Lender's last name or business name"
)
lndr_nams = fields.CharField(
max_length=10,
db_column='LNDR_NAMS',
blank=True,
help_text="Lender's suffix"
)
lndr_namt = fields.CharField(
max_length=10,
db_column='LNDR_NAMT',
blank=True,
help_text="Lender's title or prefix"
)
# loan_adr1 = fields.CharField(
# max_length=55, db_column='LOAN_ADR1', blank=True
# )
# loan_adr2 = fields.CharField(
# max_length=55, db_column='LOAN_ADR2', blank=True
# )
loan_amt1 = fields.DecimalField(
decimal_places=2,
null=True,
max_digits=14,
db_column='LOAN_AMT1',
blank=True,
help_text="Repaid or forgiven amount; Original loan amount. The \
content of this column varies based on the \
schedule/part that the record applies to. See the CAL \
document for a description of the value of this field."
)
loan_amt2 = fields.DecimalField(
decimal_places=2,
null=True,
max_digits=14,
db_column='LOAN_AMT2',
blank=True,
help_text="Outstanding Principal; unpaid balance. The content of \
this column varies based on the schedule/part that the \
record applies to. See the CAL document for a \
description of the value of this field."
)
loan_amt3 = fields.DecimalField(
decimal_places=2,
null=True,
max_digits=14,
db_column='LOAN_AMT3',
blank=True,
help_text="Interest Paid; Unpaid interest; Interest received. The \
content of this column varies based on the \
schedule/part that the record applies to. See the CAL \
document for a description of the value of this field."
)
loan_amt4 = fields.DecimalField(
decimal_places=2,
null=True,
max_digits=14,
db_column='LOAN_AMT4',
blank=True,
help_text="Cumulative Amount/Other. The content of this column \
varies based on the schedule/part that the record \
applies to. See the CAL document for a description of the \
value of this field."
)
loan_amt5 = fields.DecimalField(
decimal_places=2,
null=True,
max_digits=14,
db_column='LOAN_AMT5',
blank=True,
help_text="This field is undocumented"
)
loan_amt6 = fields.DecimalField(
decimal_places=2,
null=True,
max_digits=14,
db_column='LOAN_AMT6',
blank=True,
help_text="This field is undocumented"
)
loan_amt7 = fields.DecimalField(
decimal_places=2,
null=True,
max_digits=14,
db_column='LOAN_AMT7',
blank=True,
help_text="This field is undocumented"
)
loan_amt8 = fields.DecimalField(
decimal_places=2,
null=True,
max_digits=14,
db_column='LOAN_AMT8',
blank=True,
help_text="This field is undocumented"
)
loan_city = fields.CharField(
max_length=30,
db_column='LOAN_CITY',
blank=True,
help_text="Lender's city"
)
loan_date1 = fields.DateField(
db_column='LOAN_DATE1',
null=True,
help_text="Date the loan was made or recieved. The content of this \
column varies based on the schedule/part that the \
record applies to. See the CAL document for a description of the value."
)
loan_date2 = fields.DateField(
null=True,
db_column='LOAN_DATE2',
blank=True,
help_text="Date repaid/forgiven; date loan due. The content of this \
column varies based on the schedule/part that the \
record applies to. See the CAL document for a \
description of the value of this field."
)
loan_emp = fields.CharField(
max_length=200,
db_column='LOAN_EMP',
blank=True,
help_text="Loan employer. Applies to the Form 460 Schedule B \
Part 1."
)
loan_occ = fields.CharField(
max_length=60,
db_column='LOAN_OCC',
blank=True,
help_text="Loan occupation. Applies to the Form 460 Schedule B \
Part 1."
)
loan_rate = fields.CharField(
max_length=30,
db_column='LOAN_RATE',
blank=True,
help_text="Interest Rate. The content of this column varies based \
on the schedule/part that the record applies to. See the \
CAL document for a description of the value of this field."
)
loan_self = fields.CharField(
max_length=1,
db_column='LOAN_SELF',
blank=True,
help_text="Self-employed checkbox"
)
loan_st = fields.CharField(
max_length=2,
db_column='LOAN_ST',
blank=True,
help_text="Lender's state"
)
loan_type = fields.CharField(
max_length=3,
db_column='LOAN_TYPE',
blank=True,
help_text="Type of loan"
)
loan_zip4 = fields.CharField(
max_length=10,
db_column='LOAN_ZIP4',
blank=True,
help_text="Lender's ZIP Code"
)
memo_code = fields.CharField(
max_length=1,
db_column='MEMO_CODE',
blank=True,
help_text="Memo amount flag"
)
memo_refno = fields.CharField(
max_length=20,
db_column='MEMO_REFNO',
blank=True,
help_text="Reference to text contained in a TEXT record"
)
REC_TYPE_CHOICES = (
("LOAN", "LOAN"),
)
rec_type = fields.CharField(
verbose_name='record type',
db_column='REC_TYPE',
max_length=4,
db_index=True,
choices=REC_TYPE_CHOICES,
)
tran_id = fields.CharField(
verbose_name='transaction ID',
max_length=20,
db_column='TRAN_ID',
blank=True,
help_text='Permanent value unique to this item',
)
# tres_adr1 = fields.CharField(
# max_length=55, db_column='TRES_ADR1', blank=True
# )
# tres_adr2 = fields.CharField(
# max_length=55, db_column='TRES_ADR2', blank=True
# )
tres_city = fields.CharField(
max_length=30,
db_column='TRES_CITY',
blank=True,
help_text="Treasurer or responsible officer's city"
)
tres_namf = fields.CharField(
max_length=45,
db_column='TRES_NAMF',
blank=True,
help_text="Treasurer or responsible officer's first name"
)
tres_naml = fields.CharField(
max_length=200,
db_column='TRES_NAML',
blank=True,
help_text="Treasurer or responsible officer's last name"
)
tres_nams = fields.CharField(
max_length=10,
db_column='TRES_NAMS',
blank=True,
help_text="Treasurer or responsible officer's suffix"
)
tres_namt = fields.CharField(
max_length=10,
db_column='TRES_NAMT',
blank=True,
help_text="Treasurer or responsible officer's title or prefix"
)
tres_st = fields.CharField(
max_length=2,
db_column='TRES_ST',
blank=True,
help_text="Treasurer or responsible officer's street address"
)
tres_zip4 = fields.CharField(
max_length=10,
db_column='TRES_ZIP4',
blank=True,
help_text="Treasurer or responsible officer's ZIP Code"
)
xref_match = fields.CharField(
max_length=1,
db_column='XREF_MATCH',
blank=True,
help_text='Related item on other schedule has same transaction \
identifier. "X" indicates this condition is true.'
)
xref_schnm = fields.CharField(
max_length=2,
db_column='XREF_SCHNM',
blank=True,
help_text="Related record is included on Form 460 Schedule 'A' or 'E'"
)
class Meta:
app_label = 'calaccess_raw'
db_table = 'LOAN_CD'
verbose_name = 'LOAN_CD'
verbose_name_plural = 'LOAN_CD'
def __str__(self):
return str(self.filing_id)
@python_2_unicode_compatible
class S401Cd(CalAccessBaseModel):
"""
This table contains Form 401 (Slate Mailer Organization) payment and other
disclosure schedule (F401B, F401B-1, F401C, F401D) information.
"""
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing ID',
help_text="Unique filing identificiation number"
)
amend_id = fields.IntegerField(
db_column='AMEND_ID',
db_index=True,
help_text="Amendment identification number. A number of 0 is the \
original filing and 1 to 999 amendments.",
verbose_name="amendment ID"
)
line_item = fields.IntegerField(
db_column='LINE_ITEM',
help_text="Line item number of this record",
db_index=True,
)
REC_TYPE_CHOICES = (
("S401", "S401"),
)
rec_type = fields.CharField(
verbose_name='record type',
db_column='REC_TYPE',
max_length=4,
db_index=True,
choices=REC_TYPE_CHOICES,
)
FORM_TYPE_CHOICES = (
('F401B', 'Form 401 (Slate mailer organization campaign statement): \
Schedule B, payments made'),
('F401B-1', 'Form 401 (Slate mailer organization campaign statement): \
Schedule B-1, payments made by agent or independent contractor'),
('F401C', 'Form 401 (Slate mailer organization campaign statement): \
Schedule C, persons receiving $1,000 or more'),
('F401D', 'Form 401 (Slate mailer organization campaign statement): \
Schedule D, candidates or measures supported or opposed with < $100 payment'),
)
form_type = fields.CharField(
max_length=7,
db_column='FORM_TYPE',
blank=True,
choices=FORM_TYPE_CHOICES,
help_text='Name of the source filing form or schedule'
)
tran_id = fields.CharField(
verbose_name='transaction ID',
max_length=20,
db_column='TRAN_ID',
blank=True,
help_text='Permanent value unique to this item',
)
agent_naml = fields.CharField(
max_length=200,
db_column='AGENT_NAML',
blank=True,
help_text="Agent or independent contractor's last name"
)
agent_namf = fields.CharField(
max_length=45,
db_column='AGENT_NAMF',
blank=True,
help_text="Agent or independent contractor's first name"
)
agent_namt = fields.CharField(
max_length=200,
db_column='AGENT_NAMT',
blank=True,
help_text="Agent or independent contractor's title or prefix"
)
agent_nams = fields.CharField(
max_length=10,
db_column='AGENT_NAMS',
blank=True,
help_text="Agent or independent contractor's suffix"
)
payee_naml = fields.CharField(
max_length=200,
db_column='PAYEE_NAML',
blank=True,
help_text="Payee's business name or last name if the payee is an \
individual"
)
payee_namf = fields.CharField(
max_length=45,
db_column='PAYEE_NAMF',
blank=True,
help_text="Payee's first name if the payee is an individual"
)
payee_namt = fields.CharField(
max_length=10,
db_column='PAYEE_NAMT',
blank=True,
help_text="Payee's title or prefix if the payee is an individual"
)
payee_nams = fields.CharField(
max_length=10,
db_column='PAYEE_NAMS',
blank=True,
help_text="Payee's suffix if the payee is an individual"
)
payee_city = fields.CharField(
max_length=30,
db_column='PAYEE_CITY',
blank=True,
help_text="Payee's city address"
)
payee_st = fields.CharField(
max_length=2,
db_column='PAYEE_ST',
blank=True,
help_text="Payee state address"
)
payee_zip4 = fields.CharField(
max_length=10,
db_column='PAYEE_ZIP4',
blank=True,
help_text="Payee ZIP Code"
)
amount = fields.DecimalField(
max_digits=16,
decimal_places=2,
db_column='AMOUNT',
help_text="Amount (Sched F401B, 401B-1, 401C)"
)
aggregate = fields.DecimalField(
max_digits=16,
decimal_places=2,
db_column='AGGREGATE',
help_text="Aggregate year-to-date amount (Sched 401C)"
)
expn_dscr = fields.CharField(
max_length=90,
db_column='EXPN_DSCR',
blank=True,
help_text="Purpose of expense and/or description/explanation"
)
cand_naml = fields.CharField(
max_length=200,
db_column='CAND_NAML',
blank=True,
help_text="Candidate/officeholder last name"
)
cand_namf = fields.CharField(
max_length=45,
db_column='CAND_NAMF',
blank=True,
help_text="Candidate/officeholder first name"
)
cand_namt = fields.CharField(
max_length=10,
db_column='CAND_NAMT',
blank=True,
help_text="Candidate/officeholder title or prefix"
)
cand_nams = fields.CharField(
max_length=10,
db_column='CAND_NAMS',
blank=True,
help_text="Candidate/officeholder suffix"
)
office_cd = fields.CharField(
max_length=3,
db_column='OFFICE_CD',
blank=True,
verbose_name="Office code",
help_text="Code that identifies the office being sought"
)
offic_dscr = fields.CharField(
max_length=40,
db_column='OFFIC_DSCR',
blank=True,
help_text="Office sought description"
)
juris_cd = fields.CharField(
max_length=3,
db_column='JURIS_CD',
blank=True,
help_text="Office jurisdiction code"
)
juris_dscr = fields.CharField(
max_length=40,
db_column='JURIS_DSCR',
blank=True,
help_text="Office jurisdiction description"
)
dist_no = fields.CharField(
max_length=3,
db_column='DIST_NO',
blank=True,
help_text="District number for the office being sought. Populated \
for Senate, Assembly, or Board of Equalization races."
)
off_s_h_cd = fields.CharField(
max_length=1,
db_column='OFF_S_H_CD',
blank=True,
help_text="Office sought/held code"
)
bal_name = fields.CharField(
max_length=200,
db_column='BAL_NAME',
blank=True,
help_text="Ballot measure name"
)
bal_num = fields.CharField(
max_length=7,
db_column='BAL_NUM',
blank=True,
help_text="Ballot measure number or letter"
)
bal_juris = fields.CharField(
max_length=40,
db_column='BAL_JURIS',
blank=True,
help_text="Ballot measure jurisdiction"
)
sup_opp_cd = fields.CharField(
max_length=1,
db_column='SUP_OPP_CD',
blank=True,
help_text='Support/oppose code. Legal values are "S" for support \
or "O" for oppose. Used on Form 401.'
)
memo_code = fields.CharField(
max_length=1,
db_column='MEMO_CODE',
blank=True,
help_text="Memo amount flag"
)
memo_refno = fields.CharField(
max_length=20,
db_column='MEMO_REFNO',
blank=True,
help_text="Reference to text contained in the TEXT record"
)
bakref_tid = fields.CharField(
max_length=20,
db_column='BAKREF_TID',
blank=True,
help_text="Back reference to transaction identifier of parent record"
)
class Meta:
app_label = 'calaccess_raw'
db_table = 'S401_CD'
verbose_name = 'S401_CD'
verbose_name_plural = 'S401_CD'
def __str__(self):
return str(self.filing_id)
@python_2_unicode_compatible
class ExpnCd(CalAccessBaseModel):
"""
Campaign expenditures from a variety of forms
"""
agent_namf = fields.CharField(
max_length=45,
db_column='AGENT_NAMF',
blank=True,
help_text="Agent of Ind. Contractor's First name"
)
agent_naml = fields.CharField(
max_length=200,
db_column='AGENT_NAML',
blank=True,
help_text="Agent of Ind. Contractor's Last name (Sched G)"
)
agent_nams = fields.CharField(
max_length=10,
db_column='AGENT_NAMS',
blank=True,
help_text="Agent of Ind. Contractor's Suffix"
)
agent_namt = fields.CharField(
max_length=10,
db_column='AGENT_NAMT',
blank=True,
help_text="Agent of Ind. Contractor's Prefix or Title"
)
amend_id = fields.IntegerField(
db_column='AMEND_ID',
db_index=True,
help_text="Amendment identification number. A number of 0 is the \
original filing and 1 to 999 amendments.",
verbose_name="amendment ID"
)
amount = fields.DecimalField(
decimal_places=2,
max_digits=14,
db_column='AMOUNT',
help_text="Amount of Payment"
)
bakref_tid = fields.CharField(
max_length=20,
db_column='BAKREF_TID',
blank=True,
help_text="Back Reference to a Tran_ID of a 'parent' record"
)
bal_juris = fields.CharField(
max_length=40,
db_column='BAL_JURIS',
blank=True,
help_text="Jurisdiction"
)
bal_name = fields.CharField(
max_length=200,
db_column='BAL_NAME',
blank=True,
help_text="Ballot Measure Name"
)
bal_num = fields.CharField(
max_length=7,
db_column='BAL_NUM',
blank=True,
help_text="Ballot Number or Letter"
)
cand_namf = fields.CharField(
max_length=45,
db_column='CAND_NAMF',
blank=True,
help_text="Candidate's First name"
)
cand_naml = fields.CharField(
max_length=200,
db_column='CAND_NAML',
blank=True,
help_text="Candidate's Last name"
)
cand_nams = fields.CharField(
max_length=10,
db_column='CAND_NAMS',
blank=True,
help_text="Candidate's Suffix"
)
cand_namt = fields.CharField(
max_length=10,
db_column='CAND_NAMT',
blank=True,
help_text="Candidate's Prefix or Title"
)
cmte_id = fields.CharField(
max_length=9,
db_column='CMTE_ID',
blank=True,
help_text="Committee ID (If [COM|RCP] & no ID#, Treas info Req.)"
)
cum_oth = fields.DecimalField(
decimal_places=2,
null=True,
max_digits=14,
db_column='CUM_OTH',
blank=True,
help_text="Cumulative / 'Other' (No Cumulative on Sched E & G)"
)
cum_ytd = fields.DecimalField(
decimal_places=2,
null=True,
max_digits=14,
db_column='CUM_YTD',
blank=True,
help_text="Cumulative / Year-to-date amount \
(No Cumulative on Sched E & G)"
)
dist_no = fields.CharField(
max_length=3,
db_column='DIST_NO',
blank=True,
help_text="Office District Number (Req. if Juris_Cd=[SEN|ASM|BOE]"
)
ENTITY_CODE_CHOICES = (
# Defined here:
# http://www.documentcloud.org/documents/1308003-cal-access-cal-\
# format.html#document/p9
('', 'Unknown'),
('0', '0 (Unknown)'),
('COM', 'Committee'),
('RCP', 'Recipient Committee'),
('IND', 'Person (spending > $5,000)'),
('OTH', 'Other'),
('PTY', 'Political party'),
('SCC', 'Small contributor committee'),
('BNM', 'Ballot measure\'s name/title'),
('CAO', 'Candidate/officeholder'),
('OFF', 'Officer'),
('PTH', 'PTH (Unknown)'),
('RFD', 'RFD (Unknown)'),
('MBR', 'MBR (Unknown)'),
)
entity_cd = fields.CharField(
choices=ENTITY_CODE_CHOICES,
max_length=3,
db_column='ENTITY_CD',
blank=True,
verbose_name='entity code',
)
expn_chkno = fields.CharField(
max_length=20,
db_column='EXPN_CHKNO',
blank=True,
help_text="Check Number (Optional)"
)
expn_code = fields.CharField(
max_length=3,
db_column='EXPN_CODE',
blank=True,
help_text="Expense Code - Values: (Refer to list in Overview) \
Note: CTB & IND need explanation & listing on Sched D TRC & TRS require \
explanation."
)
expn_date = fields.DateField(
null=True,
db_column='EXPN_DATE',
blank=True,
help_text="Date of Expenditure (Note: Date not on Sched E & G)"
)
expn_dscr = fields.CharField(
max_length=400,
db_column='EXPN_DSCR',
blank=True,
help_text="Purpose of Expense and/or Description/explanation"
)
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing ID',
help_text="Unique filing identificiation number"
)
FORM_TYPE_CHOICES = (
('D', 'Form 460 (Recipient committee campaign statement): \
Schedule D, summary of expenditure supporting/opposing other candidates, \
measures and committees'),
('E', 'Form 460 (Recipient committee campaign statement): \
Schedule E, payments made'),
('G', 'Form 460 (Recipient committee campaign statement): \
Schedule G, payments made by agent of independent contractor'),
('F450P5', 'Form 450 (Recipient Committee Campaign Statement \
Short Form): Part 5, payments made'),
('F461P5', 'Form 461 (Independent expenditure and major donor \
committee campaign statement): Part 5, contributions and expenditures made'),
('F465P3', 'Form 465 (Supplemental independent expenditure \
report): Part 3, independent expenditures made'),
('F900', 'Form 900 (Public Employee\'s Retirement Board Candidate \
Campaign Statement), Schedule B, expenditures made'),
)
form_type = fields.CharField(
choices=FORM_TYPE_CHOICES,
max_length=6,
db_column='FORM_TYPE',
help_text='Name of the source filing form or schedule'
)
g_from_e_f = fields.CharField(
max_length=1,
db_column='G_FROM_E_F',
blank=True,
help_text="Back Reference from Sched G to Sched 'E' or 'F'?"
)
juris_cd = fields.CharField(
max_length=3,
db_column='JURIS_CD',
blank=True,
help_text="Office Jurisdiction Code Values: STW=Statewide; \
SEN=Senate District; ASM=Assembly District; \
BOE=Board of Equalization District; \
CIT=City; CTY=County; LOC=Local; OTH=Other"
)
juris_dscr = fields.CharField(
max_length=40,
db_column='JURIS_DSCR',
blank=True,
help_text="Office Jurisdiction Description \
(Req. if Juris_Cd=[CIT|CTY|LOC|OTH]"
)
line_item = fields.IntegerField(
db_column='LINE_ITEM',
help_text="Line item number of this record",
db_index=True,
)
memo_code = fields.CharField(
max_length=1,
db_column='MEMO_CODE',
blank=True,
help_text="Memo Amount? (Date/Amount are informational only)"
)
memo_refno = fields.CharField(
max_length=20,
db_column='MEMO_REFNO',
blank=True,
help_text="Reference to text contained in a TEXT record."
)
OFF_S_H_CD_CHOICES = (
('H', 'Office Held'),
('S', 'Office Sought'),
('A', 'A - Unknown'),
('8', '8 - Unknown'),
('O', 'O - Unknown'),
)
off_s_h_cd = fields.CharField(
choices=OFF_S_H_CD_CHOICES,
max_length=1,
db_column='OFF_S_H_CD',
blank=True,
help_text="Office Sought/Held Code: H=Held; S=Sought"
)
offic_dscr = fields.CharField(
max_length=40,
db_column='OFFIC_DSCR',
blank=True,
help_text="Office Sought Description (Req. if Office_Cd=OTH)"
)
office_cd = fields.CharField(
max_length=3,
db_column='OFFICE_CD',
blank=True,
help_text="Office Sought (See table of code in Overview)"
)
# payee_adr1 = fields.CharField(
# max_length=55,
# db_column='PAYEE_ADR1',
# blank=True,
# help_text="Address of Payee"
# )
# payee_adr2 = fields.CharField(
# max_length=55,
# db_column='PAYEE_ADR2',
# blank=True,
# help_text="Optional 2nd line of Address"
# )
payee_city = fields.CharField(
max_length=30,
db_column='PAYEE_CITY',
blank=True,
help_text="Payee City"
)
payee_namf = fields.CharField(
max_length=45,
db_column='PAYEE_NAMF',
blank=True,
help_text="Payee's First name"
)
payee_naml = fields.CharField(
max_length=200,
db_column='PAYEE_NAML',
blank=True,
help_text="Payee's Last name"
)
payee_nams = fields.CharField(
max_length=10,
db_column='PAYEE_NAMS',
blank=True,
help_text="Payee's Suffix"
)
payee_namt = fields.CharField(
max_length=10,
db_column='PAYEE_NAMT',
blank=True,
help_text="Payee's Prefix or Title"
)
payee_st = fields.CharField(
max_length=2,
db_column='PAYEE_ST',
blank=True,
help_text="State code"
)
payee_zip4 = fields.CharField(
max_length=10,
db_column='PAYEE_ZIP4',
blank=True,
help_text="Zip+4"
)
REC_TYPE_CHOICES = (
("EXPN", "EXPN"),
)
rec_type = fields.CharField(
verbose_name='record type',
db_column='REC_TYPE',
max_length=4,
db_index=True,
choices=REC_TYPE_CHOICES,
)
sup_opp_cd = fields.CharField(
max_length=1,
db_column='SUP_OPP_CD',
blank=True,
help_text="Support/Oppose? Values: S; O (F450, F461)"
)
tran_id = fields.CharField(
verbose_name='transaction ID',
max_length=20,
db_column='TRAN_ID',
blank=True,
help_text='Permanent value unique to this item',
)
# tres_adr1 = fields.CharField(
# max_length=55,
# db_column='TRES_ADR1',
# blank=True,
# help_text="Treasurer Street 1(Req if [COM|RCP] & no ID#)"
# )
# tres_adr2 = fields.CharField(
# max_length=55,
# db_column='TRES_ADR2',
# blank=True,
# help_text="Treasurer Street 2"
# )
tres_city = fields.CharField(
max_length=30,
db_column='TRES_CITY',
blank=True,
help_text="Treasurer City"
)
tres_namf = fields.CharField(
max_length=45,
db_column='TRES_NAMF',
blank=True,
help_text="Treasurer's First name (Req if [COM|RCP] & no ID#)"
)
tres_naml = fields.CharField(
max_length=200,
db_column='TRES_NAML',
blank=True,
help_text="Treasurer's Last name (Req if [COM|RCP] & no ID#)"
)
tres_nams = fields.CharField(
max_length=10,
db_column='TRES_NAMS',
blank=True,
help_text="Treasurer's Suffix"
)
tres_namt = fields.CharField(
max_length=10,
db_column='TRES_NAMT',
blank=True,
help_text="Treasurer's Prefix or Title"
)
tres_st = fields.CharField(
max_length=2,
db_column='TRES_ST',
blank=True,
help_text="Treasurer State"
)
tres_zip4 = fields.CharField(
max_length=10,
db_column='TRES_ZIP4',
blank=True,
help_text="Treasurer ZIP+4"
)
xref_match = fields.CharField(
max_length=1,
db_column='XREF_MATCH',
blank=True,
help_text="X = Related item on other Sched has same Tran_ID"
)
xref_schnm = fields.CharField(
max_length=2,
db_column='XREF_SCHNM',
blank=True,
help_text="Related item is included on Sched 'C' or 'H2'"
)
class Meta:
app_label = 'calaccess_raw'
db_table = 'EXPN_CD'
verbose_name = 'EXPN_CD'
verbose_name_plural = 'EXPN_CD'
def __str__(self):
return str(self.filing_id)
@python_2_unicode_compatible
class F495P2Cd(CalAccessBaseModel):
"""
F495 Supplemental Preelection Campaign Statement
It's attatchment to the forms below
F450 Recipient Committee Campaign Statement Short Form
F460 Recipient Committee Campaign Statement
Form 495 is for use by a recipient committee that
makes contributions totaling $10,000 or more in
connection with an election for which the committee
is not required to file regular preelection reports.
Form 495 is filed as an attachment to a campaign
disclosure statement (Form 450 or 460). On the
Form 450 or 460, the committee will report all
contributions received and expenditures made since
its last report.
"""
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing ID',
help_text="Unique filing identificiation number"
)
amend_id = fields.IntegerField(
db_column='AMEND_ID',
db_index=True,
help_text="Amendment identification number. A number of 0 is the \
original filing and 1 to 999 amendments.",
verbose_name="amendment ID"
)
line_item = fields.IntegerField(
db_column='LINE_ITEM',
help_text="Line item number of this record",
db_index=True,
)
REC_TYPE_CHOICES = (
('F495', 'F495'),
)
rec_type = fields.CharField(
verbose_name='record type',
db_column='REC_TYPE',
max_length=4,
db_index=True,
choices=REC_TYPE_CHOICES,
)
FORM_TYPE_CHOICES = (
('F450', 'Form 450 (Recipient committee campaign statement, \
short form)'),
('F460', 'Form 460 (Recipient committee campaign statement)'),
)
form_type = fields.CharField(
db_column='FORM_TYPE',
max_length=4,
choices=FORM_TYPE_CHOICES,
help_text='Name of the source filing form or schedule'
)
elect_date = fields.DateField(
db_column='ELECT_DATE',
blank=True,
null=True,
help_text="Date of the General Election This date will be the same \
as on the filing's cover (CVR) record."
)
electjuris = fields.CharField(
db_column='ELECTJURIS',
max_length=40,
help_text="Jurisdiction of the election"
)
contribamt = fields.FloatField(
db_column='CONTRIBAMT',
help_text="Contribution amount (For the period of 6 months prior to \
17 days before the election)"
)
class Meta:
app_label = 'calaccess_raw'
db_table = 'F495P2_CD'
verbose_name = 'F495P2_CD'
verbose_name_plural = 'F495P2_CD'
def __str__(self):
return str(self.filing_id)
@python_2_unicode_compatible
class DebtCd(CalAccessBaseModel):
"""
Form 460 (Recipient Committee Campaign Statement)
Schedule (F) Accrued Expenses (Unpaid Bills) records
"""
UNIQUE_KEY = (
"FILING_ID",
"AMEND_ID",
"LINE_ITEM",
"REC_TYPE",
"FORM_TYPE"
)
amend_id = fields.IntegerField(
db_column='AMEND_ID',
db_index=True,
help_text="Amendment identification number. A number of 0 is the \
original filing and 1 to 999 amendments.",
verbose_name="amendment ID"
)
amt_incur = fields.DecimalField(
decimal_places=2,
max_digits=14,
db_column='AMT_INCUR',
help_text='Amount incurred this period',
)
amt_paid = fields.DecimalField(
decimal_places=2,
max_digits=14,
db_column='AMT_PAID',
help_text='Amount paid this period.'
)
bakref_tid = fields.CharField(
max_length=20,
db_column='BAKREF_TID',
blank=True,
help_text='Back reference to a transaction identifier \
of a parent record.'
)
beg_bal = fields.DecimalField(
decimal_places=2,
max_digits=14,
db_column='BEG_BAL',
help_text='Outstanding balance at beginning of period',
)
cmte_id = fields.CharField(
max_length=9,
db_column='CMTE_ID',
blank=True,
help_text='Committee identification number',
)
end_bal = fields.DecimalField(
decimal_places=2,
max_digits=14,
db_column='END_BAL',
help_text='Outstanding balance at close of this period',
)
ENTITY_CODE_CHOICES = (
# Defined here:
# http://www.documentcloud.org/documents/1308003-cal-access-cal-\
# format.html#document/p9
('', 'Unknown'),
('BNM', 'Ballot measure\'s name/title'),
('COM', 'Committee'),
('IND', 'Person (spending > $5,000)'),
('OTH', 'Other'),
('PTY', 'Political party'),
('RCP', 'Recipient Committee'),
('SCC', 'Small contributor committee'),
)
entity_cd = fields.CharField(
max_length=3,
db_column='ENTITY_CD',
blank=True,
verbose_name='entity code',
choices=ENTITY_CODE_CHOICES,
help_text='Entity code of the payee',
)
expn_code = fields.CharField(
max_length=3,
db_column='EXPN_CODE',
blank=True,
help_text='Expense code',
)
expn_dscr = fields.CharField(
max_length=400,
db_column='EXPN_DSCR',
blank=True,
help_text='Purpose of expense and/or description/explanation',
)
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing ID',
help_text="Unique filing identificiation number of the parent filing",
)
FORM_TYPE_CHOICES = (
('F', 'Form 460 (Recipient committee campaign statement): \
Schedule F, accrued expenses (unpaid bills)'),
)
form_type = fields.CharField(
max_length=1,
db_column='FORM_TYPE',
choices=FORM_TYPE_CHOICES,
help_text='Schedule Name/ID: (F - Sched F / Accrued Expenses)'
)
line_item = fields.IntegerField(
db_column='LINE_ITEM',
help_text="Record line item number",
db_index=True,
)
memo_code = fields.CharField(
max_length=1, db_column='MEMO_CODE', blank=True,
help_text='Memo amount flag',
)
memo_refno = fields.CharField(
max_length=20,
db_column='MEMO_REFNO',
blank=True,
help_text='Reference to text contained in a TEXT record.'
)
# payee_adr1 = fields.CharField(
# max_length=55, db_column='PAYEE_ADR1', blank=True
# )
# payee_adr2 = fields.CharField(
# max_length=55, db_column='PAYEE_ADR2', blank=True
# )
payee_city = fields.CharField(
max_length=30,
db_column='PAYEE_CITY',
blank=True,
help_text='First line of the payee\'s street address',
)
payee_namf = fields.CharField(
max_length=45,
db_column='PAYEE_NAMF',
blank=True,
help_text='Payee\'s first name if the payee is an individual',
)
payee_naml = fields.CharField(
max_length=200,
db_column='PAYEE_NAML',
help_text="Payee's business name or last name if the payee is an \
individual."
)
payee_nams = fields.CharField(
max_length=10,
db_column='PAYEE_NAMS',
blank=True,
help_text='Payee\'s name suffix if the payee is an individual',
)
payee_namt = fields.CharField(
max_length=100,
db_column='PAYEE_NAMT',
blank=True,
help_text='Payee\'s prefix or title if the payee is an individual',
)
payee_st = fields.CharField(
max_length=2,
db_column='PAYEE_ST',
blank=True,
help_text='Payee\'s state',
)
payee_zip4 = fields.CharField(
max_length=10,
db_column='PAYEE_ZIP4',
blank=True,
help_text='Payee\'s ZIP Code',
)
REC_TYPE_CHOICES = (
("DEBT", "DEBT"),
)
rec_type = fields.CharField(
verbose_name='record type',
db_column='REC_TYPE',
max_length=4,
db_index=True,
choices=REC_TYPE_CHOICES,
help_text='Record type value: DEBT',
)
tran_id = fields.CharField(
verbose_name='transaction ID',
max_length=20,
db_column='TRAN_ID',
blank=True,
help_text='Transaction identifier - permanent value unique to \
this item',
)
# tres_adr1 = fields.CharField(
# max_length=55, db_column='TRES_ADR1', blank=True
# )
# tres_adr2 = fields.CharField(
# max_length=55, db_column='TRES_ADR2', blank=True
# )
tres_city = fields.CharField(
max_length=30,
db_column='TRES_CITY',
blank=True,
help_text='City portion of the treasurer or responsible \
officer\'s street address',
)
tres_namf = fields.CharField(
max_length=45,
db_column='TRES_NAMF',
blank=True,
help_text='Treasurer or responsible officer\'s first name'
)
tres_naml = fields.CharField(
max_length=200,
db_column='TRES_NAML',
blank=True,
help_text='Treasurer or responsible officer\'s last name'
)
tres_nams = fields.CharField(
max_length=10,
db_column='TRES_NAMS',
blank=True,
help_text='Treasurer or responsible officer\'s suffix',
)
tres_namt = fields.CharField(
max_length=100,
db_column='TRES_NAMT',
blank=True,
help_text='Treasurer or responsible officer\'s prefix or title',
)
tres_st = fields.CharField(
max_length=2,
db_column='TRES_ST',
blank=True,
help_text='State portion of the treasurer or responsible \
officer\'s address',
)
tres_zip4 = fields.CharField(
max_length=10,
db_column='TRES_ZIP4',
blank=True,
help_text='ZIP Code portion of the treasurer or responsible \
officer\'s address',
)
xref_match = fields.CharField(
max_length=1,
db_column='XREF_MATCH',
blank=True,
help_text='Related item on other schedule has same \
transaction identifier. /"X/" indicates this condition is true'
)
xref_schnm = fields.CharField(
max_length=2, db_column='XREF_SCHNM', blank=True,
help_text='Related record is included on Schedule C.'
)
class Meta:
app_label = 'calaccess_raw'
db_table = 'DEBT_CD'
verbose_name = 'DEBT_CD'
verbose_name_plural = 'DEBT_CD'
def __str__(self):
return str(self.filing_id)
@python_2_unicode_compatible
class S496Cd(CalAccessBaseModel):
"""
Form 496 Late Independent Expenditures
"""
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing ID',
help_text="Unique filing identificiation number"
)
amend_id = fields.IntegerField(
db_column='AMEND_ID',
db_index=True,
help_text="Amendment identification number. A number of 0 is the \
original filing and 1 to 999 amendments.",
verbose_name="amendment ID"
)
line_item = fields.IntegerField(
db_column='LINE_ITEM',
help_text="Line item number of this record",
db_index=True,
)
REC_TYPE_CHOICES = (
('S496', 'S496'),
)
rec_type = fields.CharField(
verbose_name='record type',
max_length=4,
db_column='REC_TYPE',
db_index=True,
choices=REC_TYPE_CHOICES,
)
FORM_TYPE_CHOICES = (
('F496', 'F496 (Late independent expenditure report)'),
)
form_type = fields.CharField(
max_length=4, db_column='FORM_TYPE', blank=True,
choices=FORM_TYPE_CHOICES,
help_text='Name of the source filing form or schedule'
)
tran_id = fields.CharField(
verbose_name='transaction ID',
max_length=20,
db_column='TRAN_ID',
blank=True,
help_text='Permanent value unique to this item',
)
amount = fields.DecimalField(
max_digits=16,
decimal_places=2,
db_column='AMOUNT',
help_text="Expenditure amount"
)
exp_date = fields.DateField(
db_column='EXP_DATE',
null=True,
help_text="Expenditure dates"
)
expn_dscr = fields.CharField(
max_length=90,
db_column='EXPN_DSCR',
blank=True,
help_text="Purpose of expense and/or description/explanation"
)
memo_code = fields.CharField(
max_length=1,
db_column='MEMO_CODE',
blank=True,
help_text="Memo amount flag"
)
memo_refno = fields.CharField(
max_length=20,
db_column='MEMO_REFNO',
blank=True,
help_text="Reference to text contained in a TEXT record"
)
date_thru = fields.DateField(
db_column='DATE_THRU',
null=True,
help_text="End of date range for items paid"
)
class Meta:
app_label = 'calaccess_raw'
db_table = 'S496_CD'
verbose_name = 'S496_CD'
verbose_name_plural = 'S496_CD'
def __str__(self):
return "{} Filing {}, Amendment {}".format(
self.form_type,
self.filing_id,
self.amend_id
)
@python_2_unicode_compatible
class SpltCd(CalAccessBaseModel):
"""
Split records
"""
amend_id = fields.IntegerField(
db_column='AMEND_ID',
db_index=True,
help_text="Amendment identification number. A number of 0 is the \
original filing and 1 to 999 amendments.",
verbose_name="amendment ID"
)
elec_amount = fields.DecimalField(
max_digits=16,
decimal_places=2,
db_column='ELEC_AMOUNT',
help_text="This field is undocumented"
)
elec_code = fields.CharField(
max_length=2,
db_column='ELEC_CODE',
blank=True,
help_text='This field is undocumented',
)
elec_date = fields.DateField(
db_column='ELEC_DATE',
null=True,
help_text="This field is undocumented"
)
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing ID',
help_text="Unique filing identificiation number"
)
line_item = fields.IntegerField(
db_column='LINE_ITEM',
help_text="Line item number of this record",
db_index=True,
)
PFORM_TYPE_CHOICES = (
('A', ''),
('B1', ''),
('B2', ''),
('C', ''),
('D', ''),
('F450P5', ''),
('H', ''),
)
pform_type = fields.CharField(
max_length=7,
db_column='PFORM_TYPE',
db_index=True,
choices=PFORM_TYPE_CHOICES,
help_text='This field is undocumented',
)
ptran_id = fields.CharField(
verbose_name='transaction ID',
max_length=32,
db_column='PTRAN_ID',
blank=True,
help_text='Permanent value unique to this item',
)
class Meta:
app_label = 'calaccess_raw'
db_table = 'SPLT_CD'
verbose_name = 'SPLT_CD'
verbose_name_plural = 'SPLT_CD'
def __str__(self):
return str(self.filing_id)
@python_2_unicode_compatible
class S497Cd(CalAccessBaseModel):
"""
Form 497: Late Contributions Received/Made
"""
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing ID',
help_text="Unique filing identificiation number"
)
amend_id = fields.IntegerField(
db_column='AMEND_ID',
db_index=True,
help_text="Amendment identification number. A number of 0 is the \
original filing and 1 to 999 amendments.",
verbose_name="amendment ID"
)
line_item = fields.IntegerField(
db_column='LINE_ITEM',
help_text="Line item number of this record",
db_index=True,
)
REC_TYPE_CHOICES = (
("S497", "S497"),
)
rec_type = fields.CharField(
verbose_name='record type',
db_column='REC_TYPE',
max_length=4,
db_index=True,
choices=REC_TYPE_CHOICES,
)
FORM_TYPE_CHOICES = (
('F497P1', 'Form 497 (Late contribution report): \
Part 1, late contributions received'),
('F497P2', 'Form 497 (Late contribution report): \
Part 2, late contributions made')
)
form_type = fields.CharField(
max_length=6,
db_column='FORM_TYPE',
choices=FORM_TYPE_CHOICES,
help_text='Name of the source filing form or schedule'
)
tran_id = fields.CharField(
verbose_name='transaction ID',
max_length=20,
db_column='TRAN_ID',
blank=True,
help_text='Permanent value unique to this item',
)
ENTITY_CODE_CHOICES = (
# Defined here:
# http://www.documentcloud.org/documents/1308003-cal-access-cal-\
# format.html#document/p9
('', 'Unknown'),
('0', '0 (Unknown)'),
('BNM', 'Ballot measure\'s name/title'),
('CAO', 'Candidate/officerholder'),
('CTL', 'Controlled committee'),
('COM', 'Committee'),
('IND', 'Person (spending > $5,000)'),
('OFF', 'Officer'),
('OTH', 'Other'),
('PTY', 'Political party'),
('RCP', 'Recipient Committee'),
('SCC', 'Small contributor committee'),
)
entity_cd = fields.CharField(
max_length=3,
db_column='ENTITY_CD',
blank=True,
verbose_name='entity code',
choices=ENTITY_CODE_CHOICES,
)
enty_naml = fields.CharField(
max_length=200,
db_column='ENTY_NAML',
blank=True,
help_text="Entity's last name or business name"
)
enty_namf = fields.CharField(
max_length=45,
db_column='ENTY_NAMF',
blank=True,
help_text="Entity's first name"
)
enty_namt = fields.CharField(
max_length=10,
db_column='ENTY_NAMT',
blank=True,
help_text="Entity's title or prefix"
)
enty_nams = fields.CharField(
max_length=10,
db_column='ENTY_NAMS',
blank=True,
help_text="Entity's suffix"
)
enty_city = fields.CharField(
max_length=30,
db_column='ENTY_CITY',
blank=True,
help_text="Filing committee's city address"
)
enty_st = fields.CharField(
max_length=2,
db_column='ENTY_ST',
blank=True,
help_text="Filing committee's state address"
)
enty_zip4 = fields.CharField(
max_length=10,
db_column='ENTY_ZIP4',
blank=True,
help_text="Filing committee's ZIP Code"
)
ctrib_emp = fields.CharField(
max_length=200,
db_column='CTRIB_EMP',
blank=True,
help_text="Employer"
)
ctrib_occ = fields.CharField(
max_length=60,
db_column='CTRIB_OCC',
blank=True,
help_text="Occupation"
)
ctrib_self = fields.CharField(
max_length=1,
db_column='CTRIB_SELF',
blank=True,
help_text='Self employed checkbox. "X" indicates the contributor is \
self-employed.'
)
elec_date = fields.DateField(
db_column='ELEC_DATE',
null=True,
help_text="Date of election"
)
ctrib_date = fields.DateField(
db_column='CTRIB_DATE',
null=True,
help_text="Date item received/made"
)
date_thru = fields.DateField(
db_column='DATE_THRU',
null=True,
help_text="End of date range for items received"
)
amount = fields.DecimalField(
max_digits=16,
decimal_places=2,
db_column='AMOUNT',
help_text="Amount received/made"
)
cmte_id = fields.CharField(
max_length=9,
db_column='CMTE_ID',
blank=True,
verbose_name="Committee ID",
help_text="Committee identification number"
)
cand_naml = fields.CharField(
max_length=200,
db_column='CAND_NAML',
blank=True,
help_text="Candidate/officeholder's last name"
)
cand_namf = fields.CharField(
max_length=45,
db_column='CAND_NAMF',
blank=True,
help_text="Candidate/officeholder's first name"
)
cand_namt = fields.CharField(
max_length=10,
db_column='CAND_NAMT',
blank=True,
help_text="Candidate/officeholder's title or prefix"
)
cand_nams = fields.CharField(
max_length=10,
db_column='CAND_NAMS',
blank=True,
help_text="Candidate/officeholder's suffix"
)
office_cd = fields.CharField(
max_length=3,
db_column='OFFICE_CD',
blank=True,
verbose_name="Office code",
help_text="Office sought code"
)
offic_dscr = fields.CharField(
max_length=40,
db_column='OFFIC_DSCR',
blank=True,
help_text="Office sought description"
)
juris_cd = fields.CharField(
max_length=3,
db_column='JURIS_CD',
blank=True,
verbose_name="Jurisdiction code"
)
juris_dscr = fields.CharField(
max_length=40,
db_column='JURIS_DSCR',
blank=True,
help_text="Office jurisdiction description"
)
dist_no = fields.CharField(
max_length=3,
db_column='DIST_NO',
blank=True,
help_text="District number for the office being sought. Populated \
for Senate, Assembly, or Board of Equalization races."
)
off_s_h_cd = fields.CharField(
max_length=1,
db_column='OFF_S_H_CD',
blank=True,
help_text='Office Sought/Held Code. Legal values are "S" for \
sought and "H" for held.'
)
bal_name = fields.CharField(
max_length=200,
db_column='BAL_NAME',
blank=True,
help_text="Ballot measure name"
)
bal_num = fields.CharField(
max_length=7,
db_column='BAL_NUM',
blank=True,
help_text="Ballot measure number"
)
bal_juris = fields.CharField(
max_length=40,
db_column='BAL_JURIS',
blank=True,
help_text="Ballot measure jurisdiction"
)
memo_code = fields.CharField(
max_length=1,
db_column='MEMO_CODE',
blank=True,
help_text="Memo amount flag"
)
memo_refno = fields.CharField(
max_length=20,
db_column='MEMO_REFNO',
blank=True,
help_text="Reference to text contained in TEXT code"
)
bal_id = fields.CharField(
max_length=9,
db_column='BAL_ID',
blank=True,
help_text="This field is undocumented"
)
cand_id = fields.CharField(
max_length=9,
db_column='CAND_ID',
blank=True,
help_text="This field is undocumented"
)
sup_off_cd = fields.CharField(
max_length=1,
db_column='SUP_OFF_CD',
blank=True,
help_text="This field is undocumented"
)
sup_opp_cd = fields.CharField(
max_length=1,
db_column='SUP_OPP_CD',
blank=True,
help_text="This field is undocumented"
)
def __str__(self):
return "{} Filing {}, Amendment {}".format(
self.get_form_type_display(),
self.filing_id,
self.amend_id
)
class Meta:
app_label = 'calaccess_raw'
db_table = 'S497_CD'
verbose_name = 'S497_CD'
verbose_name_plural = 'S497_CD'
@python_2_unicode_compatible
class F501502Cd(CalAccessBaseModel):
"""
Candidate intention statement
"""
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing ID',
help_text="Unique filing identificiation number"
)
amend_id = fields.IntegerField(
db_column='AMEND_ID',
db_index=True,
help_text="Amendment identification number. A number of 0 is the \
original filing and 1 to 999 amendments.",
verbose_name="amendment ID"
)
REC_TYPE_CHOICES = (
("CVR", "CVR"),
)
rec_type = fields.CharField(
verbose_name='record type',
db_column='REC_TYPE',
max_length=4,
db_index=True,
choices=REC_TYPE_CHOICES,
)
FORM_TYPE_CHOICES = (
('F501', 'Form 501 (Candidate intention statement)'),
('F502', 'Form 502 (Campaign bank account statement)')
)
form_type = fields.CharField(
db_column='FORM_TYPE',
max_length=4,
choices=FORM_TYPE_CHOICES,
help_text='Name of the source filing form or schedule'
)
filer_id = fields.CharField(
verbose_name='filer ID',
db_column='FILER_ID',
max_length=9,
blank=True,
db_index=True,
help_text="Filer's unique identification number",
)
committee_id = fields.CharField(
db_column='COMMITTEE_ID',
max_length=8,
blank=True,
verbose_name="Committee ID",
help_text='Committee identification number'
)
entity_cd = fields.CharField(
db_column='ENTITY_CD',
blank=True,
max_length=3,
help_text='Entity code'
)
report_num = fields.IntegerField(
db_column='REPORT_NUM',
blank=True,
null=True,
help_text='Report Number; 000 Original; 001-999 Amended'
)
rpt_date = fields.DateTimeField(
db_column='RPT_DATE',
blank=True,
null=True,
help_text='date this report is filed'
)
stmt_type = fields.IntegerField(
db_column='STMT_TYPE',
help_text="Type of statement"
)
from_date = fields.CharField(
db_column='FROM_DATE',
max_length=32,
blank=True,
help_text='Reporting period from date'
)
thru_date = fields.CharField(
db_column='THRU_DATE',
max_length=32,
blank=True,
help_text="Reporting period through date"
)
elect_date = fields.CharField(
db_column='ELECT_DATE',
max_length=32,
blank=True,
help_text='Date of election'
)
cand_naml = fields.CharField(
db_column='CAND_NAML',
max_length=81,
blank=True,
help_text="Candidate/officerholder last name"
)
cand_namf = fields.CharField(
db_column='CAND_NAMF',
max_length=25,
blank=True,
help_text="Candidate/officerholder first name"
)
can_namm = fields.CharField(
db_column='CAN_NAMM',
max_length=10,
blank=True,
help_text='Candidate/officeholder middle name'
)
cand_namt = fields.CharField(
db_column='CAND_NAMT',
max_length=7,
blank=True,
help_text="Candidate/officerholder title or prefix"
)
cand_nams = fields.CharField(
db_column='CAND_NAMS',
max_length=7,
blank=True,
help_text="Candidate/officeholder suffix"
)
moniker_pos = fields.CharField(
db_column='MONIKER_POS',
max_length=32,
blank=True,
help_text="Location of the candidate/officeholder's moniker"
)
moniker = fields.CharField(
db_column='MONIKER',
max_length=4,
blank=True,
help_text="Candidate/officeholder's moniker"
)
cand_city = fields.CharField(
db_column='CAND_CITY',
max_length=22,
blank=True,
help_text="Candidate/officerholder city"
)
cand_st = fields.CharField(
db_column='CAND_ST',
max_length=4,
blank=True,
help_text='Candidate/officeholder state'
)
cand_zip4 = fields.CharField(
db_column='CAND_ZIP4',
max_length=10,
blank=True,
help_text='Candidate/officeholder zip +4'
)
cand_phon = fields.CharField(
db_column='CAND_PHON',
max_length=14,
blank=True,
help_text='Candidate/officeholder phone number'
)
cand_fax = fields.CharField(
db_column='CAND_FAX',
max_length=14,
blank=True,
help_text="Candidate/officerholder fax"
)
cand_email = fields.CharField(
db_column='CAND_EMAIL',
max_length=37,
blank=True,
help_text='Candidate/officeholder email address'
)
fin_naml = fields.CharField(
db_column='FIN_NAML',
max_length=53,
blank=True,
help_text="Financial institution's business name"
)
fin_namf = fields.CharField(
db_column='FIN_NAMF',
max_length=32,
blank=True,
help_text="Unused. Financial institution's first name."
)
fin_namt = fields.CharField(
db_column='FIN_NAMT',
max_length=32,
blank=True,
help_text="Unused. Financial institution's title."
)
fin_nams = fields.CharField(
db_column='FIN_NAMS',
max_length=32,
blank=True,
help_text="Unused. Financial institution's suffix."
)
fin_city = fields.CharField(
db_column='FIN_CITY',
max_length=20,
blank=True,
help_text="Financial institution's city."
)
fin_st = fields.CharField(
db_column='FIN_ST',
max_length=4,
blank=True,
help_text="Financial institution's state."
)
fin_zip4 = fields.CharField(
db_column='FIN_ZIP4',
max_length=9,
blank=True,
help_text="Financial institution's zip code."
)
fin_phon = fields.CharField(
db_column='FIN_PHON',
max_length=14,
blank=True,
help_text="Financial institution's phone number."
)
fin_fax = fields.CharField(
db_column='FIN_FAX',
max_length=10,
blank=True,
help_text="Financial institution's FAX Number."
)
fin_email = fields.CharField(
db_column='FIN_EMAIL',
max_length=15,
blank=True,
help_text="Financial institution's e-mail address."
)
office_cd = fields.IntegerField(
db_column='OFFICE_CD',
help_text="Office sought code"
)
offic_dscr = fields.CharField(
db_column='OFFIC_DSCR',
max_length=50,
blank=True,
help_text="Office sought description"
)
agency_nam = fields.CharField(
db_column='AGENCY_NAM',
max_length=63,
blank=True,
help_text="Agency name"
)
juris_cd = fields.IntegerField(
db_column='JURIS_CD',
blank=True,
null=True,
help_text='Office jurisdiction code'
)
juris_dscr = fields.CharField(
db_column='JURIS_DSCR',
max_length=14,
blank=True,
help_text='office jurisdiction description'
)
dist_no = fields.CharField(
db_column='DIST_NO',
max_length=4,
blank=True,
help_text='District number for the office being sought. \
Populated for Senate, Assembly or Board of Equalization races.'
)
party = fields.CharField(
db_column='PARTY',
max_length=20,
blank=True,
help_text="Political party"
)
yr_of_elec = fields.IntegerField(
db_column='YR_OF_ELEC',
blank=True,
null=True,
help_text='Year of election'
)
elec_type = fields.IntegerField(
db_column='ELEC_TYPE',
blank=True,
null=True,
verbose_name="Election type"
)
execute_dt = fields.DateTimeField(
db_column='EXECUTE_DT',
blank=True,
null=True,
help_text='Execution date'
)
can_sig = fields.CharField(
db_column='CAN_SIG',
max_length=13,
blank=True,
help_text='Candidate signature'
)
account_no = fields.CharField(
db_column='ACCOUNT_NO',
max_length=22,
blank=True,
help_text='Account number'
)
acct_op_dt = fields.DateField(
db_column='ACCT_OP_DT',
blank=True,
null=True,
help_text='Account open date'
)
party_cd = fields.IntegerField(
db_column='PARTY_CD',
blank=True,
null=True,
help_text="Party code"
)
district_cd = fields.IntegerField(
db_column='DISTRICT_CD',
blank=True,
null=True,
help_text='District number for the office being sought. \
Populated for Senate, Assembly, or Board of Equalization races.'
)
accept_limit_yn = fields.IntegerField(
db_column='ACCEPT_LIMIT_YN',
blank=True,
null=True,
help_text='This field is undocumented'
)
did_exceed_dt = fields.DateField(
db_column='DID_EXCEED_DT',
blank=True,
null=True,
help_text='This field is undocumented'
)
cntrb_prsnl_fnds_dt = fields.DateField(
db_column='CNTRB_PRSNL_FNDS_DT',
blank=True,
null=True,
help_text="This field is undocumented"
)
def __str__(self):
return str(self.filing_id)
class Meta:
app_label = 'calaccess_raw'
db_table = 'F501_502_CD'
verbose_name = 'F501_502_CD'
verbose_name_plural = 'F501_502_CD'
@python_2_unicode_compatible
class S498Cd(CalAccessBaseModel):
"""
Form 498: Slate Mailer Late Independent Expenditures Made
"""
UNIQUE_KEY = (
"FILING_ID",
"AMEND_ID",
"LINE_ITEM",
"REC_TYPE",
"FORM_TYPE",
)
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing ID',
help_text="Unique filing identificiation number"
)
amend_id = fields.IntegerField(
db_column='AMEND_ID',
db_index=True,
help_text="Amendment identification number. A number of 0 is the \
original filing and 1 to 999 amendments.",
verbose_name="amendment ID"
)
line_item = fields.IntegerField(
db_column='LINE_ITEM',
help_text="Line item number of this record",
db_index=True,
)
REC_TYPE_CHOICES = (
("S498", "S498"),
)
rec_type = fields.CharField(
verbose_name='record type',
db_column='REC_TYPE',
max_length=4,
db_index=True,
choices=REC_TYPE_CHOICES,
)
FORM_TYPE_CHOICES = (
('F498-A', 'Form 498 (Slate mailer late payment report): \
Part A: late payments attributed to'),
('F498-R', 'Form 498 (Slate mailer late payment report): \
Part R: late payments received from')
)
form_type = fields.CharField(
max_length=9,
db_column='FORM_TYPE',
blank=True,
choices=FORM_TYPE_CHOICES,
help_text='Name of the source filing form or schedule'
)
tran_id = fields.CharField(
verbose_name='transaction ID',
max_length=20,
db_column='TRAN_ID',
blank=True,
help_text='Permanent value unique to this item',
)
ENTITY_CODE_CHOICES = (
# Defined here:
# http://www.documentcloud.org/documents/1308003-cal-access-cal-\
# format.html#document/p9
('', 'Unknown'),
('CAO', 'Candidate/officerholder'),
('COM', 'Committee'),
('IND', 'Person (spending > $5,000)'),
('OTH', 'Other'),
('RCP', 'Recipient Committee'),
)
entity_cd = fields.CharField(
max_length=3,
db_column='ENTITY_CD',
blank=True,
verbose_name='entity code',
choices=ENTITY_CODE_CHOICES,
)
cmte_id = fields.CharField(
max_length=9,
db_column='CMTE_ID',
blank=True,
verbose_name="Committee ID",
help_text="Committee identification number"
)
payor_naml = fields.CharField(
max_length=200,
db_column='PAYOR_NAML',
blank=True,
help_text="Payor's last name or business name"
)
payor_namf = fields.CharField(
max_length=45,
db_column='PAYOR_NAMF',
blank=True,
help_text="Payor's first name."
)
payor_namt = fields.CharField(
max_length=10,
db_column='PAYOR_NAMT',
blank=True,
help_text="Payor's Prefix or title."
)
payor_nams = fields.CharField(
max_length=10,
db_column='PAYOR_NAMS',
blank=True,
help_text="Payor's suffix."
)
payor_city = fields.CharField(
max_length=30,
db_column='PAYOR_CITY',
blank=True,
help_text="Payor's city."
)
payor_st = fields.CharField(
max_length=2,
db_column='PAYOR_ST',
blank=True,
help_text="Payor's State."
)
payor_zip4 = fields.CharField(
max_length=10,
db_column='PAYOR_ZIP4',
blank=True,
help_text="Payor's zip code"
)
date_rcvd = fields.DateField(
db_column='DATE_RCVD',
null=True,
help_text="Date received"
)
amt_rcvd = fields.DecimalField(
max_digits=16,
decimal_places=2,
db_column='AMT_RCVD',
help_text="Amount received"
)
cand_naml = fields.CharField(
max_length=200,
db_column='CAND_NAML',
blank=True,
help_text="Candidate/officerholder last name"
)
cand_namf = fields.CharField(
max_length=45,
db_column='CAND_NAMF',
blank=True,
help_text="Candidate/officerholder first name"
)
cand_namt = fields.CharField(
max_length=10,
db_column='CAND_NAMT',
blank=True,
help_text="Candidate/officerholder title or prefix"
)
cand_nams = fields.CharField(
max_length=10,
db_column='CAND_NAMS',
blank=True,
help_text="Candidate/officerholder suffix"
)
office_cd = fields.CharField(
max_length=3,
db_column='OFFICE_CD',
blank=True,
verbose_name='Office code',
help_text="Code that identifies the office being sought"
)
offic_dscr = fields.CharField(
max_length=40,
db_column='OFFIC_DSCR',
blank=True,
help_text="Description of office sought"
)
juris_cd = fields.CharField(
max_length=3,
db_column='JURIS_CD',
blank=True,
help_text="Office jurisdiction code"
)
juris_dscr = fields.CharField(
max_length=40,
db_column='JURIS_DSCR',
blank=True,
help_text="Office jurisdiction description"
)
dist_no = fields.CharField(
max_length=3,
db_column='DIST_NO',
blank=True,
help_text="District number for the office being sought. \
Populated for Senate, Assembly, or Board of Equalization races."
)
off_s_h_cd = fields.CharField(
max_length=1,
db_column='OFF_S_H_CD',
blank=True,
help_text='Office Sought/Held Code. Legal values are "S" for \
sought and "H" for held'
)
bal_name = fields.CharField(
max_length=200,
db_column='BAL_NAME',
blank=True,
help_text="Ballot measure name"
)
bal_num = fields.CharField(
max_length=7,
db_column='BAL_NUM',
blank=True,
help_text="Ballot measure number or letter."
)
bal_juris = fields.CharField(
max_length=40,
db_column='BAL_JURIS',
blank=True,
help_text="Jurisdiction of ballot measure"
)
sup_opp_cd = fields.CharField(
max_length=1,
db_column='SUP_OPP_CD',
blank=True,
help_text='Support/oppose code. Legal values are "S" for support \
or "O" for oppose.'
)
amt_attrib = fields.DecimalField(
max_digits=16,
decimal_places=2,
db_column='AMT_ATTRIB',
help_text="Amount attributed (only if Form_type = 'F498-A')"
)
memo_code = fields.CharField(
max_length=1,
db_column='MEMO_CODE',
blank=True,
help_text="Memo amount flat"
)
memo_refno = fields.CharField(
max_length=20,
db_column='MEMO_REFNO',
blank=True,
help_text='Reference text contained in TEXT record'
)
employer = fields.CharField(
max_length=200,
db_column='EMPLOYER',
blank=True,
help_text="This field is undocumented"
)
occupation = fields.CharField(
max_length=60,
db_column='OCCUPATION',
blank=True,
help_text='This field is undocumented'
)
selfemp_cb = fields.CharField(
max_length=1,
db_column='SELFEMP_CB',
blank=True,
help_text='Self-employed checkbox'
)
def __str__(self):
return str(self.filing_id)
class Meta:
app_label = 'calaccess_raw'
db_table = 'S498_CD'
verbose_name = 'S498_CD'
verbose_name_plural = 'S498_CD'
| mit |
QijunPan/ansible | lib/ansible/modules/system/osx_defaults.py | 25 | 14472 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, GeekChimp - Franck Nijhof <[email protected]>
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: osx_defaults
author: Franck Nijhof (@frenck)
short_description: osx_defaults allows users to read, write, and delete Mac OS X user defaults from Ansible
description:
- osx_defaults allows users to read, write, and delete Mac OS X user defaults from Ansible scripts.
Mac OS X applications and other programs use the defaults system to record user preferences and other
information that must be maintained when the applications aren't running (such as default font for new
documents, or the position of an Info panel).
version_added: "2.0"
options:
domain:
description:
- The domain is a domain name of the form com.companyname.appname.
required: false
default: NSGlobalDomain
host:
description:
- The host on which the preference should apply. The special value "currentHost" corresponds to the
"-currentHost" switch of the defaults commandline tool.
required: false
default: null
version_added: "2.1"
key:
description:
- The key of the user preference
required: true
type:
description:
- The type of value to write.
required: false
default: string
choices: [ "array", "bool", "boolean", "date", "float", "int", "integer", "string" ]
array_add:
description:
- Add new elements to the array for a key which has an array as its value.
required: false
default: false
choices: [ "true", "false" ]
value:
description:
- The value to write. Only required when state = present.
required: false
default: null
state:
description:
- The state of the user defaults
required: false
default: present
choices: [ "present", "absent" ]
notes:
- Apple Mac caches defaults. You may need to logout and login to apply the changes.
'''
EXAMPLES = '''
- osx_defaults:
domain: com.apple.Safari
key: IncludeInternalDebugMenu
type: bool
value: true
state: present
- osx_defaults:
domain: NSGlobalDomain
key: AppleMeasurementUnits
type: string
value: Centimeters
state: present
- osx_defaults:
domain: com.apple.screensaver
host: currentHost
key: showClock
type: int
value: 1
- osx_defaults:
key: AppleMeasurementUnits
type: string
value: Centimeters
- osx_defaults:
key: AppleLanguages
type: array
value:
- en
- nl
- osx_defaults:
domain: com.geekchimp.macable
key: ExampleKeyToRemove
state: absent
'''
import datetime
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
# exceptions --------------------------------------------------------------- {{{
class OSXDefaultsException(Exception):
pass
# /exceptions -------------------------------------------------------------- }}}
# class MacDefaults -------------------------------------------------------- {{{
class OSXDefaults(object):
""" Class to manage Mac OS user defaults """
# init ---------------------------------------------------------------- {{{
""" Initialize this module. Finds 'defaults' executable and preps the parameters """
def __init__(self, **kwargs):
# Initial var for storing current defaults value
self.current_value = None
# Just set all given parameters
for key, val in kwargs.items():
setattr(self, key, val)
# Try to find the defaults executable
self.executable = self.module.get_bin_path(
'defaults',
required=False,
opt_dirs=self.path.split(':'),
)
if not self.executable:
raise OSXDefaultsException("Unable to locate defaults executable.")
# When state is present, we require a parameter
if self.state == "present" and self.value is None:
raise OSXDefaultsException("Missing value parameter")
# Ensure the value is the correct type
self.value = self._convert_type(self.type, self.value)
# /init --------------------------------------------------------------- }}}
# tools --------------------------------------------------------------- {{{
""" Converts value to given type """
def _convert_type(self, type, value):
if type == "string":
return str(value)
elif type in ["bool", "boolean"]:
if isinstance(value, basestring):
value = value.lower()
if value in [True, 1, "true", "1", "yes"]:
return True
elif value in [False, 0, "false", "0", "no"]:
return False
raise OSXDefaultsException("Invalid boolean value: {0}".format(repr(value)))
elif type == "date":
try:
return datetime.datetime.strptime(value.split("+")[0].strip(), "%Y-%m-%d %H:%M:%S")
except ValueError:
raise OSXDefaultsException(
"Invalid date value: {0}. Required format yyy-mm-dd hh:mm:ss.".format(repr(value))
)
elif type in ["int", "integer"]:
if not str(value).isdigit():
raise OSXDefaultsException("Invalid integer value: {0}".format(repr(value)))
return int(value)
elif type == "float":
try:
value = float(value)
except ValueError:
raise OSXDefaultsException("Invalid float value: {0}".format(repr(value)))
return value
elif type == "array":
if not isinstance(value, list):
raise OSXDefaultsException("Invalid value. Expected value to be an array")
return value
raise OSXDefaultsException('Type is not supported: {0}'.format(type))
""" Returns a normalized list of commandline arguments based on the "host" attribute """
def _host_args(self):
if self.host is None:
return []
elif self.host == 'currentHost':
return ['-currentHost']
else:
return ['-host', self.host]
""" Returns a list containing the "defaults" executable and any common base arguments """
def _base_command(self):
return [self.executable] + self._host_args()
""" Converts array output from defaults to an list """
@staticmethod
def _convert_defaults_str_to_list(value):
# Split output of defaults. Every line contains a value
value = value.splitlines()
# Remove first and last item, those are not actual values
value.pop(0)
value.pop(-1)
# Remove extra spaces and comma (,) at the end of values
value = [re.sub(',$', '', x.strip(' ')) for x in value]
return value
# /tools -------------------------------------------------------------- }}}
# commands ------------------------------------------------------------ {{{
""" Reads value of this domain & key from defaults """
def read(self):
# First try to find out the type
rc, out, err = self.module.run_command(self._base_command() + ["read-type", self.domain, self.key])
# If RC is 1, the key does not exists
if rc == 1:
return None
# If the RC is not 0, then terrible happened! Ooooh nooo!
if rc != 0:
raise OSXDefaultsException("An error occurred while reading key type from defaults: " + out)
# Ok, lets parse the type from output
type = out.strip().replace('Type is ', '')
# Now get the current value
rc, out, err = self.module.run_command(self._base_command() + ["read", self.domain, self.key])
# Strip output
out = out.strip()
# An non zero RC at this point is kinda strange...
if rc != 0:
raise OSXDefaultsException("An error occurred while reading key value from defaults: " + out)
# Convert string to list when type is array
if type == "array":
out = self._convert_defaults_str_to_list(out)
# Store the current_value
self.current_value = self._convert_type(type, out)
""" Writes value to this domain & key to defaults """
def write(self):
# We need to convert some values so the defaults commandline understands it
if isinstance(self.value, bool):
if self.value:
value = "TRUE"
else:
value = "FALSE"
elif isinstance(self.value, (int, float)):
value = str(self.value)
elif self.array_add and self.current_value is not None:
value = list(set(self.value) - set(self.current_value))
elif isinstance(self.value, datetime.datetime):
value = self.value.strftime('%Y-%m-%d %H:%M:%S')
else:
value = self.value
# When the type is array and array_add is enabled, morph the type :)
if self.type == "array" and self.array_add:
self.type = "array-add"
# All values should be a list, for easy passing it to the command
if not isinstance(value, list):
value = [value]
rc, out, err = self.module.run_command(self._base_command() + ['write', self.domain, self.key, '-' + self.type] + value)
if rc != 0:
raise OSXDefaultsException('An error occurred while writing value to defaults: ' + out)
""" Deletes defaults key from domain """
def delete(self):
rc, out, err = self.module.run_command(self._base_command() + ['delete', self.domain, self.key])
if rc != 0:
raise OSXDefaultsException("An error occurred while deleting key from defaults: " + out)
# /commands ----------------------------------------------------------- }}}
# run ----------------------------------------------------------------- {{{
""" Does the magic! :) """
def run(self):
# Get the current value from defaults
self.read()
# Handle absent state
if self.state == "absent":
if self.current_value is None:
return False
if self.module.check_mode:
return True
self.delete()
return True
# There is a type mismatch! Given type does not match the type in defaults
value_type = type(self.value)
if self.current_value is not None and not isinstance(self.current_value, value_type):
raise OSXDefaultsException("Type mismatch. Type in defaults: " + type(self.current_value).__name__)
# Current value matches the given value. Nothing need to be done. Arrays need extra care
if self.type == "array" and self.current_value is not None and not self.array_add and \
set(self.current_value) == set(self.value):
return False
elif self.type == "array" and self.current_value is not None and self.array_add and \
len(list(set(self.value) - set(self.current_value))) == 0:
return False
elif self.current_value == self.value:
return False
if self.module.check_mode:
return True
# Change/Create/Set given key/value for domain in defaults
self.write()
return True
# /run ---------------------------------------------------------------- }}}
# /class MacDefaults ------------------------------------------------------ }}}
# main -------------------------------------------------------------------- {{{
def main():
module = AnsibleModule(
argument_spec=dict(
domain=dict(
default="NSGlobalDomain",
required=False,
),
host=dict(
default=None,
required=False,
),
key=dict(
default=None,
),
type=dict(
default="string",
required=False,
choices=[
"array",
"bool",
"boolean",
"date",
"float",
"int",
"integer",
"string",
],
),
array_add=dict(
default=False,
required=False,
type='bool',
),
value=dict(
default=None,
required=False,
type='raw'
),
state=dict(
default="present",
required=False,
choices=[
"absent", "present"
],
),
path=dict(
default="/usr/bin:/usr/local/bin",
required=False,
)
),
supports_check_mode=True,
)
domain = module.params['domain']
host = module.params['host']
key = module.params['key']
type = module.params['type']
array_add = module.params['array_add']
value = module.params['value']
state = module.params['state']
path = module.params['path']
try:
defaults = OSXDefaults(module=module, domain=domain, host=host, key=key, type=type,
array_add=array_add, value=value, state=state, path=path)
changed = defaults.run()
module.exit_json(changed=changed)
except OSXDefaultsException:
e = get_exception()
module.fail_json(msg=e.message)
# /main ------------------------------------------------------------------- }}}
if __name__ == '__main__':
main()
| gpl-3.0 |
sam17/room-of-requirement | alexa_skill/alexa_dumbledore_skill.py | 1 | 3214 | import logging
import json
from flask_ask import Ask,request,session, question, statement
from flask import Flask
import requests
import datetime
SERVER_IP = "http://ec2-52-221-204-189.ap-southeast-1.compute.amazonaws.com:3000/"
THIS = "Saturn"
app = Flask(__name__)
ask = Ask(app, "/")
logging.getLogger('flask_ask').setLevel(logging.DEBUG)
@ask.launch
def launch():
speech_text = 'Hi, I am Dumbledore. I can tell you everything aboout Rooms at App Dynamics'
return question(speech_text).reprompt(speech_text).simple_card('DumbledoreRespone', speech_text)
@ask.intent('BookingIntent', mapping={'room': 'ROOM', 'fromTime': 'FROMTIME', 'toTime':'TOTIME', 'team':'TEAM', 'date':'DATE' }, default={'date': datetime.datetime.now().strftime ("%Y-%m-%d"), 'team': 'Platform' })
def book(room, fromTime, toTime, team, date):
if room == 'this':
room = THIS
startTime = date + "T" + str(fromTime)
endTime = date + "T" + str(toTime)
resp = requests.post(SERVER_IP+'listAvailableRooms', json={"startTime": startTime, "endTime": endTime})
if resp.status_code !=200:
return statement("Node Server Error, Please check node log").simple_card('DumbledoreResponse', speech_text)
available_rooms = json.loads(resp.text)
if(room in available_rooms):
resp = requests.post(SERVER_IP+'bookRoom', json={"organizer": team, "invitees" : "", "room": room, "startTime": startTime , "endTime": endTime })
if resp.status_code !=200:
return statement("Node Server Error, Please check node log").simple_card('DumbledoreResponse', speech_text)
speech_text = "Booking done for " + room + " by " + str(team) + " on " + date + " at " + fromTime
return statement(speech_text).simple_card('DumbledoreResponse', speech_text)
else:
speech_text = "Sorry, Room is already booked."
return statement(speech_text).simple_card('DumbledoreResponse', speech_text)
speech_text = "Sorry, I did not get all information"
return statement(speech_text).simple_card('DumbledoreResponse', speech_text)
@ask.intent('EmptyIntent', mapping={'fromTime': 'FROMTIME', 'toTime':'TOTIME','date':'DATE' }, default={'date': datetime.datetime.now().strftime ("%Y-%m-%d") })
def findEmtpy(fromTime, toTime, date):
startTime = date + "T" + str(fromTime)
endTime = date + "T" + str(toTime)
print startTime, endTime
resp = requests.post(SERVER_IP+'listAvailableRooms', json={"startTime": startTime, "endTime": endTime})
if resp.status_code !=200:
return statement("Node Server Error, Please check node log").simple_card('DumbledoreResponse', speech_text)
available_rooms = json.loads(resp.text)
print available_rooms
speech_text = "Available Rooms are " + ", ".join([r.encode('utf-8') for r in available_rooms])
return statement(speech_text).simple_card('DumbledoreResponse', speech_text)
@ask.intent('AMAZON.HelpIntent')
def help():
speech_text = 'Ask me about occupancy only now'
return question(speech_text).reprompt(speech_text).simple_card('DumbledoreResponse', speech_text)
@ask.session_ended
def session_ended():
return "", 200
if __name__ == '__main__':
app.run(debug=True)
| mit |
alrusdi/lettuce | tests/integration/lib/Django-1.3/tests/regressiontests/views/tests/debug.py | 50 | 6467 | import inspect
import sys
from django.conf import settings
from django.core.files.uploadedfile import SimpleUploadedFile
from django.test import TestCase, RequestFactory
from django.core.urlresolvers import reverse
from django.template import TemplateSyntaxError
from django.views.debug import ExceptionReporter
from regressiontests.views import BrokenException, except_args
class DebugViewTests(TestCase):
def setUp(self):
self.old_debug = settings.DEBUG
settings.DEBUG = True
self.old_template_debug = settings.TEMPLATE_DEBUG
settings.TEMPLATE_DEBUG = True
def tearDown(self):
settings.DEBUG = self.old_debug
settings.TEMPLATE_DEBUG = self.old_template_debug
def test_files(self):
response = self.client.get('/views/raises/')
self.assertEqual(response.status_code, 500)
data = {
'file_data.txt': SimpleUploadedFile('file_data.txt', 'haha'),
}
response = self.client.post('/views/raises/', data)
self.assertTrue('file_data.txt' in response.content)
self.assertFalse('haha' in response.content)
def test_404(self):
response = self.client.get('/views/raises404/')
self.assertEqual(response.status_code, 404)
def test_view_exceptions(self):
for n in range(len(except_args)):
self.assertRaises(BrokenException, self.client.get,
reverse('view_exception', args=(n,)))
def test_template_exceptions(self):
for n in range(len(except_args)):
try:
self.client.get(reverse('template_exception', args=(n,)))
except TemplateSyntaxError, e:
raising_loc = inspect.trace()[-1][-2][0].strip()
self.assertFalse(raising_loc.find('raise BrokenException') == -1,
"Failed to find 'raise BrokenException' in last frame of traceback, instead found: %s" %
raising_loc)
def test_template_loader_postmortem(self):
response = self.client.get(reverse('raises_template_does_not_exist'))
self.assertContains(response, 'templates/i_dont_exist.html</code> (File does not exist)</li>', status_code=500)
class ExceptionReporterTests(TestCase):
rf = RequestFactory()
def test_request_and_exception(self):
"A simple exception report can be generated"
try:
request = self.rf.get('/test_view/')
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<h1>ValueError at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">Can't find my keys</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertIn('<th>Exception Type:</th>', html)
self.assertIn('<th>Exception Value:</th>', html)
self.assertIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
def test_no_request(self):
"An exception report can be generated without request"
try:
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<h1>ValueError</h1>', html)
self.assertIn('<pre class="exception_value">Can't find my keys</pre>', html)
self.assertNotIn('<th>Request Method:</th>', html)
self.assertNotIn('<th>Request URL:</th>', html)
self.assertIn('<th>Exception Type:</th>', html)
self.assertIn('<th>Exception Value:</th>', html)
self.assertIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertIn('<p>Request data not supplied</p>', html)
def test_no_exception(self):
"An exception report can be generated for just a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertIn('<h1>Report at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">No exception supplied</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
def test_request_and_message(self):
"A message can be provided in addition to a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, "I'm a little teapot", None)
html = reporter.get_traceback_html()
self.assertIn('<h1>Report at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">I'm a little teapot</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
def test_message_only(self):
reporter = ExceptionReporter(None, None, "I'm a little teapot", None)
html = reporter.get_traceback_html()
self.assertIn('<h1>Report</h1>', html)
self.assertIn('<pre class="exception_value">I'm a little teapot</pre>', html)
self.assertNotIn('<th>Request Method:</th>', html)
self.assertNotIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertIn('<p>Request data not supplied</p>', html)
| gpl-3.0 |
GcsSloop/PythonNote | PythonCode/Python入门/函数/定义可变参数.py | 1 | 1027 | #coding=utf-8
#author: sloop
'''
ÈÎÎñ
Çë±àд½ÓÊܿɱä²ÎÊýµÄ average() º¯Êý¡£
'''
#´úÂë
def average(*args):
s = sum(args)*1.0 #ºÍ
l = len(args) #¸öÊý
return 0.0 if l==0 else s/l #ƽ¾ùÖµ
print average()
print average(1, 2)
print average(1, 2, 2, 3, 4)
'''
def average(*args):
return 0.0 if len(args)==0 else sum(args)*1.0/len(args)
'''
'''
¶¨Òå¿É±ä²ÎÊý
Èç¹ûÏëÈÃÒ»¸öº¯ÊýÄܽÓÊÜÈÎÒâ¸ö²ÎÊý£¬ÎÒÃǾͿÉÒÔ¶¨ÒåÒ»¸ö¿É±ä²ÎÊý£º
def fn(*args):
print args
¿É±ä²ÎÊýµÄÃû×ÖÇ°ÃæÓиö * ºÅ£¬ÎÒÃÇ¿ÉÒÔ´«Èë0¸ö¡¢1¸ö»ò¶à¸ö²ÎÊý¸ø¿É±ä²ÎÊý£º
>>> fn()
()
>>> fn('a')
('a',)
>>> fn('a', 'b')
('a', 'b')
>>> fn('a', 'b', 'c')
('a', 'b', 'c')
¿É±ä²ÎÊýÒ²²»ÊǺÜÉñÃØ£¬Python½âÊÍÆ÷»á°Ñ´«ÈëµÄÒ»×é²ÎÊý×é×°³ÉÒ»¸ötuple´«µÝ¸ø¿É±ä²ÎÊý£¬Òò´Ë£¬ÔÚº¯ÊýÄÚ²¿£¬Ö±½Ó°Ñ±äÁ¿ args ¿´³ÉÒ»¸ö tuple ¾ÍºÃÁË¡£
¶¨Òå¿É±ä²ÎÊýµÄÄ¿µÄÒ²ÊÇΪÁ˼ò»¯µ÷Ó᣼ÙÉèÎÒÃÇÒª¼ÆËãÈÎÒâ¸öÊýµÄƽ¾ùÖµ£¬¾Í¿ÉÒÔ¶¨ÒåÒ»¸ö¿É±ä²ÎÊý£º
def average(*args):
...
ÕâÑù£¬ÔÚµ÷ÓõÄʱºò£¬¿ÉÒÔÕâÑùд£º
>>> average()
0
>>> average(1, 2)
1.5
>>> average(1, 2, 2, 3, 4)
2.4
''' | apache-2.0 |
Bostonncity/omaha | installers/tagged_installer.py | 65 | 3236 | #!/usr/bin/python2.4
# Copyright 2009-2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
import os
import re
from installers import tag_meta_installers
def TagOneBundle(env, bundle, untagged_binary_path, output_dir):
tag_str = tag_meta_installers.BuildTagStringForBundle(bundle)
# Need to find relative path to output file under source dir, to allow
# it to be redirected under the output directory.
indx = bundle.output_file_name.find('installers')
relative_filepath = bundle.output_file_name[indx+len('installers')+1:]
tag_exe = '$TESTS_DIR/ApplyTag.exe'
tag_output = env.Command(
target='%s/%s' % (output_dir, relative_filepath),
source=untagged_binary_path,
action='%s $SOURCES $TARGET %s' % (
env.File(tag_exe).abspath, tag_str)
)
# Add extra (hidden) dependency plus a dependency on the tag executable.
env.Depends(tag_output, [bundle.installers_txt_filename, tag_exe])
return tag_output
def _ReadAllBundleInstallerFiles(installers_txt_files_path):
"""Enumerates all the .*_installers.txt files in the installers_txt_files_path
directory, and creates bundles corresponding to the info in each line in
the *_installers.txt file.
Returns:
Returns a dictionary of Bundles with key=lang.
"""
bundles = {}
files = os.listdir(installers_txt_files_path)
for file in files:
regex = re.compile('^(.*)_installers.txt$')
if not regex.match(file):
continue
installer_file = os.path.join(installers_txt_files_path, file)
# Read in the installer file.
read_bundles = tag_meta_installers.ReadBundleInstallerFile(installer_file)
for (key, bundle_list) in read_bundles.items():
if not bundle_list or not key:
continue
if not bundles.has_key(key):
bundles[key] = bundle_list
else:
new_bundles_list = bundles[key] + bundle_list
bundles[key] = new_bundles_list
return bundles
def CreateTaggedInstallers(env, installers_txt_files_path, product_name,
prefix = ''):
"""For each application with an installers.txt file in installer_files_path,
create tagged metainstaller(s).
"""
bundles = _ReadAllBundleInstallerFiles(installers_txt_files_path)
untagged_binary = '%s%sSetup.exe' % (prefix, product_name)
tag_meta_installers.SetOutputFileNames(untagged_binary, bundles, '')
for bundles_lang in bundles.itervalues():
for bundle in bundles_lang:
TagOneBundle(
env=env,
bundle=bundle,
untagged_binary_path='$STAGING_DIR/%s' % (untagged_binary),
output_dir='$TARGET_ROOT/Tagged_Installers',
)
| apache-2.0 |
batxes/4Cin | SHH_INV_models/SHH_INV_models_final_output_0.2_-0.1_10000/mtx1_models/SHH_INV_models752.py | 4 | 17573 | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "particle_0 geometry" not in marker_sets:
s=new_marker_set('particle_0 geometry')
marker_sets["particle_0 geometry"]=s
s= marker_sets["particle_0 geometry"]
mark=s.place_marker((-207.536, 2706.03, 7917.36), (0.7, 0.7, 0.7), 890.203)
if "particle_1 geometry" not in marker_sets:
s=new_marker_set('particle_1 geometry')
marker_sets["particle_1 geometry"]=s
s= marker_sets["particle_1 geometry"]
mark=s.place_marker((1684.38, 2907.3, 7418.01), (0.7, 0.7, 0.7), 792.956)
if "particle_2 geometry" not in marker_sets:
s=new_marker_set('particle_2 geometry')
marker_sets["particle_2 geometry"]=s
s= marker_sets["particle_2 geometry"]
mark=s.place_marker((2671.05, 4365.58, 7351.74), (0.7, 0.7, 0.7), 856.786)
if "particle_3 geometry" not in marker_sets:
s=new_marker_set('particle_3 geometry')
marker_sets["particle_3 geometry"]=s
s= marker_sets["particle_3 geometry"]
mark=s.place_marker((2186.77, 5810.76, 9108.36), (0.7, 0.7, 0.7), 963.679)
if "particle_4 geometry" not in marker_sets:
s=new_marker_set('particle_4 geometry')
marker_sets["particle_4 geometry"]=s
s= marker_sets["particle_4 geometry"]
mark=s.place_marker((3329.61, 5579.94, 9302.3), (0.7, 0.7, 0.7), 761.442)
if "particle_5 geometry" not in marker_sets:
s=new_marker_set('particle_5 geometry')
marker_sets["particle_5 geometry"]=s
s= marker_sets["particle_5 geometry"]
mark=s.place_marker((4653.13, 3971.93, 8239.47), (0.7, 0.7, 0.7), 961.183)
if "particle_6 geometry" not in marker_sets:
s=new_marker_set('particle_6 geometry')
marker_sets["particle_6 geometry"]=s
s= marker_sets["particle_6 geometry"]
mark=s.place_marker((5619.79, 1960.64, 10117.4), (0.7, 0.7, 0.7), 753.151)
if "particle_7 geometry" not in marker_sets:
s=new_marker_set('particle_7 geometry')
marker_sets["particle_7 geometry"]=s
s= marker_sets["particle_7 geometry"]
mark=s.place_marker((5968.15, 1845.22, 11491.8), (1, 0.7, 0), 1098.07)
if "particle_8 geometry" not in marker_sets:
s=new_marker_set('particle_8 geometry')
marker_sets["particle_8 geometry"]=s
s= marker_sets["particle_8 geometry"]
mark=s.place_marker((5530.3, 1584.13, 6937.88), (0.7, 0.7, 0.7), 1010.42)
if "particle_9 geometry" not in marker_sets:
s=new_marker_set('particle_9 geometry')
marker_sets["particle_9 geometry"]=s
s= marker_sets["particle_9 geometry"]
mark=s.place_marker((5001.09, 801.888, 6351.18), (1, 0.7, 0), 821.043)
if "particle_10 geometry" not in marker_sets:
s=new_marker_set('particle_10 geometry')
marker_sets["particle_10 geometry"]=s
s= marker_sets["particle_10 geometry"]
mark=s.place_marker((3569.13, 1628.14, 5590.41), (0.7, 0.7, 0.7), 873.876)
if "particle_11 geometry" not in marker_sets:
s=new_marker_set('particle_11 geometry')
marker_sets["particle_11 geometry"]=s
s= marker_sets["particle_11 geometry"]
mark=s.place_marker((1740.6, 1967.36, 4789.98), (0.7, 0.7, 0.7), 625.532)
if "particle_12 geometry" not in marker_sets:
s=new_marker_set('particle_12 geometry')
marker_sets["particle_12 geometry"]=s
s= marker_sets["particle_12 geometry"]
mark=s.place_marker((2805.79, 2717.89, 3907.71), (0.7, 0.7, 0.7), 880.474)
if "particle_13 geometry" not in marker_sets:
s=new_marker_set('particle_13 geometry')
marker_sets["particle_13 geometry"]=s
s= marker_sets["particle_13 geometry"]
mark=s.place_marker((3099.01, 4521.59, 4161.69), (0.7, 0.7, 0.7), 659.161)
if "particle_14 geometry" not in marker_sets:
s=new_marker_set('particle_14 geometry')
marker_sets["particle_14 geometry"]=s
s= marker_sets["particle_14 geometry"]
mark=s.place_marker((1444.17, 5013.24, 4746.8), (0.7, 0.7, 0.7), 831.745)
if "particle_15 geometry" not in marker_sets:
s=new_marker_set('particle_15 geometry')
marker_sets["particle_15 geometry"]=s
s= marker_sets["particle_15 geometry"]
mark=s.place_marker((-1046.68, 5460.68, 4552.49), (0.7, 0.7, 0.7), 803.065)
if "particle_16 geometry" not in marker_sets:
s=new_marker_set('particle_16 geometry')
marker_sets["particle_16 geometry"]=s
s= marker_sets["particle_16 geometry"]
mark=s.place_marker((301.217, 4203.71, 5195.8), (0.7, 0.7, 0.7), 610.262)
if "particle_17 geometry" not in marker_sets:
s=new_marker_set('particle_17 geometry')
marker_sets["particle_17 geometry"]=s
s= marker_sets["particle_17 geometry"]
mark=s.place_marker((595.756, 2816.11, 5818.69), (0.7, 0.7, 0.7), 741.265)
if "particle_18 geometry" not in marker_sets:
s=new_marker_set('particle_18 geometry')
marker_sets["particle_18 geometry"]=s
s= marker_sets["particle_18 geometry"]
mark=s.place_marker((108.534, 4103.83, 5643.7), (0.7, 0.7, 0.7), 748.625)
if "particle_19 geometry" not in marker_sets:
s=new_marker_set('particle_19 geometry')
marker_sets["particle_19 geometry"]=s
s= marker_sets["particle_19 geometry"]
mark=s.place_marker((624.405, 4209.43, 4169.54), (0.7, 0.7, 0.7), 677.181)
if "particle_20 geometry" not in marker_sets:
s=new_marker_set('particle_20 geometry')
marker_sets["particle_20 geometry"]=s
s= marker_sets["particle_20 geometry"]
mark=s.place_marker((2411.96, 3896.98, 4144.71), (0.7, 0.7, 0.7), 616.015)
if "particle_21 geometry" not in marker_sets:
s=new_marker_set('particle_21 geometry')
marker_sets["particle_21 geometry"]=s
s= marker_sets["particle_21 geometry"]
mark=s.place_marker((2000.53, 4239.23, 4159.93), (0.7, 0.7, 0.7), 653.154)
if "particle_22 geometry" not in marker_sets:
s=new_marker_set('particle_22 geometry')
marker_sets["particle_22 geometry"]=s
s= marker_sets["particle_22 geometry"]
mark=s.place_marker((1742.34, 2927.09, 4069.99), (0.7, 0.7, 0.7), 595.33)
if "particle_23 geometry" not in marker_sets:
s=new_marker_set('particle_23 geometry')
marker_sets["particle_23 geometry"]=s
s= marker_sets["particle_23 geometry"]
mark=s.place_marker((1663.14, 2121.01, 5106.55), (0.7, 0.7, 0.7), 627.901)
if "particle_24 geometry" not in marker_sets:
s=new_marker_set('particle_24 geometry')
marker_sets["particle_24 geometry"]=s
s= marker_sets["particle_24 geometry"]
mark=s.place_marker((1817.67, 3538.84, 5854.66), (0.7, 0.7, 0.7), 663.941)
if "particle_25 geometry" not in marker_sets:
s=new_marker_set('particle_25 geometry')
marker_sets["particle_25 geometry"]=s
s= marker_sets["particle_25 geometry"]
mark=s.place_marker((2084.82, 4224.3, 4706.3), (0.7, 0.7, 0.7), 663.899)
if "particle_26 geometry" not in marker_sets:
s=new_marker_set('particle_26 geometry')
marker_sets["particle_26 geometry"]=s
s= marker_sets["particle_26 geometry"]
mark=s.place_marker((812.209, 3668.45, 4360.17), (0.7, 0.7, 0.7), 644.694)
if "particle_27 geometry" not in marker_sets:
s=new_marker_set('particle_27 geometry')
marker_sets["particle_27 geometry"]=s
s= marker_sets["particle_27 geometry"]
mark=s.place_marker((2063.61, 3437.53, 3453.95), (0.7, 0.7, 0.7), 896.802)
if "particle_28 geometry" not in marker_sets:
s=new_marker_set('particle_28 geometry')
marker_sets["particle_28 geometry"]=s
s= marker_sets["particle_28 geometry"]
mark=s.place_marker((2309.34, 4748.94, 3645.29), (0.7, 0.7, 0.7), 576.38)
if "particle_29 geometry" not in marker_sets:
s=new_marker_set('particle_29 geometry')
marker_sets["particle_29 geometry"]=s
s= marker_sets["particle_29 geometry"]
mark=s.place_marker((2730.54, 5664.55, 3149.44), (0.7, 0.7, 0.7), 635.092)
if "particle_30 geometry" not in marker_sets:
s=new_marker_set('particle_30 geometry')
marker_sets["particle_30 geometry"]=s
s= marker_sets["particle_30 geometry"]
mark=s.place_marker((2933.81, 4737.39, 4435.11), (0.7, 0.7, 0.7), 651.505)
if "particle_31 geometry" not in marker_sets:
s=new_marker_set('particle_31 geometry')
marker_sets["particle_31 geometry"]=s
s= marker_sets["particle_31 geometry"]
mark=s.place_marker((3808.47, 4674.35, 5680.5), (0.7, 0.7, 0.7), 718.042)
if "particle_32 geometry" not in marker_sets:
s=new_marker_set('particle_32 geometry')
marker_sets["particle_32 geometry"]=s
s= marker_sets["particle_32 geometry"]
mark=s.place_marker((3438.26, 3579.11, 4630.85), (0.7, 0.7, 0.7), 726.714)
if "particle_33 geometry" not in marker_sets:
s=new_marker_set('particle_33 geometry')
marker_sets["particle_33 geometry"]=s
s= marker_sets["particle_33 geometry"]
mark=s.place_marker((2043.07, 3340.69, 4134.85), (0.7, 0.7, 0.7), 673.585)
if "particle_34 geometry" not in marker_sets:
s=new_marker_set('particle_34 geometry')
marker_sets["particle_34 geometry"]=s
s= marker_sets["particle_34 geometry"]
mark=s.place_marker((1787.54, 2969.07, 3380.98), (0.7, 0.7, 0.7), 598.418)
if "particle_35 geometry" not in marker_sets:
s=new_marker_set('particle_35 geometry')
marker_sets["particle_35 geometry"]=s
s= marker_sets["particle_35 geometry"]
mark=s.place_marker((1645.76, 1695.63, 2744.43), (0.7, 0.7, 0.7), 697.612)
if "particle_36 geometry" not in marker_sets:
s=new_marker_set('particle_36 geometry')
marker_sets["particle_36 geometry"]=s
s= marker_sets["particle_36 geometry"]
mark=s.place_marker((4088.18, 3123.64, 3942.17), (0.7, 0.7, 0.7), 799.808)
if "particle_37 geometry" not in marker_sets:
s=new_marker_set('particle_37 geometry')
marker_sets["particle_37 geometry"]=s
s= marker_sets["particle_37 geometry"]
mark=s.place_marker((5851.9, 2237.99, 5250.2), (0.7, 0.7, 0.7), 1132.58)
if "particle_38 geometry" not in marker_sets:
s=new_marker_set('particle_38 geometry')
marker_sets["particle_38 geometry"]=s
s= marker_sets["particle_38 geometry"]
mark=s.place_marker((6254.9, 2864.86, 4784.87), (0.7, 0.7, 0.7), 1011.94)
if "particle_39 geometry" not in marker_sets:
s=new_marker_set('particle_39 geometry')
marker_sets["particle_39 geometry"]=s
s= marker_sets["particle_39 geometry"]
mark=s.place_marker((7842, 3352.29, 4988.8), (0.7, 0.7, 0.7), 782.592)
if "particle_40 geometry" not in marker_sets:
s=new_marker_set('particle_40 geometry')
marker_sets["particle_40 geometry"]=s
s= marker_sets["particle_40 geometry"]
mark=s.place_marker((8442.78, 1706.41, 5330.33), (0.7, 0.7, 0.7), 856.575)
if "particle_41 geometry" not in marker_sets:
s=new_marker_set('particle_41 geometry')
marker_sets["particle_41 geometry"]=s
s= marker_sets["particle_41 geometry"]
mark=s.place_marker((8712.35, 721.168, 5440.16), (1, 0.7, 0), 706.579)
if "particle_42 geometry" not in marker_sets:
s=new_marker_set('particle_42 geometry')
marker_sets["particle_42 geometry"]=s
s= marker_sets["particle_42 geometry"]
mark=s.place_marker((8226.46, 1717.29, 5605.1), (0.7, 0.7, 0.7), 1015.96)
if "particle_43 geometry" not in marker_sets:
s=new_marker_set('particle_43 geometry')
marker_sets["particle_43 geometry"]=s
s= marker_sets["particle_43 geometry"]
mark=s.place_marker((7544.8, 4487.46, 5711.28), (0.7, 0.7, 0.7), 1205.72)
if "particle_44 geometry" not in marker_sets:
s=new_marker_set('particle_44 geometry')
marker_sets["particle_44 geometry"]=s
s= marker_sets["particle_44 geometry"]
mark=s.place_marker((7695.9, 6446.19, 4957.51), (0.7, 0.7, 0.7), 841.939)
if "particle_45 geometry" not in marker_sets:
s=new_marker_set('particle_45 geometry')
marker_sets["particle_45 geometry"]=s
s= marker_sets["particle_45 geometry"]
mark=s.place_marker((7256.93, 6039.91, 6381.91), (1, 0.7, 0), 806.999)
if "particle_46 geometry" not in marker_sets:
s=new_marker_set('particle_46 geometry')
marker_sets["particle_46 geometry"]=s
s= marker_sets["particle_46 geometry"]
mark=s.place_marker((7497.79, 6638.54, 6421.06), (0.7, 0.7, 0.7), 958.856)
if "particle_47 geometry" not in marker_sets:
s=new_marker_set('particle_47 geometry')
marker_sets["particle_47 geometry"]=s
s= marker_sets["particle_47 geometry"]
mark=s.place_marker((6413.48, 8122.47, 6844.33), (0.7, 0.7, 0.7), 952.892)
if "particle_48 geometry" not in marker_sets:
s=new_marker_set('particle_48 geometry')
marker_sets["particle_48 geometry"]=s
s= marker_sets["particle_48 geometry"]
mark=s.place_marker((6629.36, 8875.54, 6493.19), (0.7, 0.7, 0.7), 809.284)
if "particle_49 geometry" not in marker_sets:
s=new_marker_set('particle_49 geometry')
marker_sets["particle_49 geometry"]=s
s= marker_sets["particle_49 geometry"]
mark=s.place_marker((7435.87, 8371.95, 6510.56), (0.7, 0.7, 0.7), 709.159)
if "particle_50 geometry" not in marker_sets:
s=new_marker_set('particle_50 geometry')
marker_sets["particle_50 geometry"]=s
s= marker_sets["particle_50 geometry"]
mark=s.place_marker((5989.03, 9002.13, 5702.32), (0.7, 0.7, 0.7), 859.832)
if "particle_51 geometry" not in marker_sets:
s=new_marker_set('particle_51 geometry')
marker_sets["particle_51 geometry"]=s
s= marker_sets["particle_51 geometry"]
mark=s.place_marker((4391.34, 8324.38, 5529.45), (0.7, 0.7, 0.7), 800.866)
if "particle_52 geometry" not in marker_sets:
s=new_marker_set('particle_52 geometry')
marker_sets["particle_52 geometry"]=s
s= marker_sets["particle_52 geometry"]
mark=s.place_marker((5378.06, 8405.9, 3980.26), (0.7, 0.7, 0.7), 949.508)
if "particle_53 geometry" not in marker_sets:
s=new_marker_set('particle_53 geometry')
marker_sets["particle_53 geometry"]=s
s= marker_sets["particle_53 geometry"]
mark=s.place_marker((6200.24, 6878.48, 3151.95), (0.7, 0.7, 0.7), 891.98)
if "particle_54 geometry" not in marker_sets:
s=new_marker_set('particle_54 geometry')
marker_sets["particle_54 geometry"]=s
s= marker_sets["particle_54 geometry"]
mark=s.place_marker((4450.93, 6621.99, 4060.48), (0.7, 0.7, 0.7), 890.034)
if "particle_55 geometry" not in marker_sets:
s=new_marker_set('particle_55 geometry')
marker_sets["particle_55 geometry"]=s
s= marker_sets["particle_55 geometry"]
mark=s.place_marker((5338, 7988.48, 4527.63), (0.7, 0.7, 0.7), 804.165)
if "particle_56 geometry" not in marker_sets:
s=new_marker_set('particle_56 geometry')
marker_sets["particle_56 geometry"]=s
s= marker_sets["particle_56 geometry"]
mark=s.place_marker((5367.09, 8253.16, 2873.56), (0.7, 0.7, 0.7), 826.796)
if "particle_57 geometry" not in marker_sets:
s=new_marker_set('particle_57 geometry')
marker_sets["particle_57 geometry"]=s
s= marker_sets["particle_57 geometry"]
mark=s.place_marker((5479.75, 8879.3, 1980.47), (0.7, 0.7, 0.7), 1085.8)
if "particle_58 geometry" not in marker_sets:
s=new_marker_set('particle_58 geometry')
marker_sets["particle_58 geometry"]=s
s= marker_sets["particle_58 geometry"]
mark=s.place_marker((5947.12, 6583.23, 2908.12), (0.7, 0.7, 0.7), 906.997)
if "particle_59 geometry" not in marker_sets:
s=new_marker_set('particle_59 geometry')
marker_sets["particle_59 geometry"]=s
s= marker_sets["particle_59 geometry"]
mark=s.place_marker((7349.08, 7707.97, 3099.42), (0.7, 0.7, 0.7), 708.694)
if "particle_60 geometry" not in marker_sets:
s=new_marker_set('particle_60 geometry')
marker_sets["particle_60 geometry"]=s
s= marker_sets["particle_60 geometry"]
mark=s.place_marker((8479.76, 8059.54, 4193.14), (0.7, 0.7, 0.7), 780.223)
if "particle_61 geometry" not in marker_sets:
s=new_marker_set('particle_61 geometry')
marker_sets["particle_61 geometry"]=s
s= marker_sets["particle_61 geometry"]
mark=s.place_marker((9005.78, 8746.7, 5528.34), (0.7, 0.7, 0.7), 757.424)
if "particle_62 geometry" not in marker_sets:
s=new_marker_set('particle_62 geometry')
marker_sets["particle_62 geometry"]=s
s= marker_sets["particle_62 geometry"]
mark=s.place_marker((9534.56, 9031.12, 7035.6), (0.7, 0.7, 0.7), 817.574)
if "particle_63 geometry" not in marker_sets:
s=new_marker_set('particle_63 geometry')
marker_sets["particle_63 geometry"]=s
s= marker_sets["particle_63 geometry"]
mark=s.place_marker((8290.42, 9809.39, 7604.57), (0.7, 0.7, 0.7), 782.423)
if "particle_64 geometry" not in marker_sets:
s=new_marker_set('particle_64 geometry')
marker_sets["particle_64 geometry"]=s
s= marker_sets["particle_64 geometry"]
mark=s.place_marker((6941.69, 8739.69, 7554.86), (0.7, 0.7, 0.7), 906.404)
if "particle_65 geometry" not in marker_sets:
s=new_marker_set('particle_65 geometry')
marker_sets["particle_65 geometry"]=s
s= marker_sets["particle_65 geometry"]
mark=s.place_marker((7010.92, 8963.91, 5742.91), (0.7, 0.7, 0.7), 818.463)
if "particle_66 geometry" not in marker_sets:
s=new_marker_set('particle_66 geometry')
marker_sets["particle_66 geometry"]=s
s= marker_sets["particle_66 geometry"]
mark=s.place_marker((6550.28, 9238.93, 6352.59), (0.7, 0.7, 0.7), 759.539)
if "particle_67 geometry" not in marker_sets:
s=new_marker_set('particle_67 geometry')
marker_sets["particle_67 geometry"]=s
s= marker_sets["particle_67 geometry"]
mark=s.place_marker((6205.33, 8570.56, 8049.29), (0.7, 0.7, 0.7), 1088.59)
if "particle_68 geometry" not in marker_sets:
s=new_marker_set('particle_68 geometry')
marker_sets["particle_68 geometry"]=s
s= marker_sets["particle_68 geometry"]
mark=s.place_marker((7021.52, 9898.13, 6690.05), (0.7, 0.7, 0.7), 822.312)
if "particle_69 geometry" not in marker_sets:
s=new_marker_set('particle_69 geometry')
marker_sets["particle_69 geometry"]=s
s= marker_sets["particle_69 geometry"]
mark=s.place_marker((6856.63, 8957.73, 5793.29), (0.7, 0.7, 0.7), 749.81)
if "particle_70 geometry" not in marker_sets:
s=new_marker_set('particle_70 geometry')
marker_sets["particle_70 geometry"]=s
s= marker_sets["particle_70 geometry"]
mark=s.place_marker((6189.93, 9564.67, 6718.62), (0.7, 0.7, 0.7), 764.488)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| gpl-3.0 |
WielderOfMjoelnir/pypeira | pypeira/core/time.py | 1 | 1497 | # This might no have any use anymore, as the HDU objects is the new thing.
# Possibly changed it a bit to a time converter instead of getter I suppose.
def hdu_get_time(hdu, time_format='bmjd'):
"""
Will be used as a key function for the list.sort() or sorted() functions.
Example,
hdus.sort(key=hdu_fits_by_time)
where hdus is a list of HDU objects, will call hdu_by_time() on each element
in the list, and then sort the elements according to the value returned from the
key function, that is in this case hdu_by_time().
Parameters
----------
hdu: HDU object
The HDU object which is an element in the list that is to be sorted.
time_format: str, optional
The time format you want to sort by, even though it should not matter.
Returns
-------
float
It's the header entry BMJD_OBS (Barycentric Julian Date of observation), which
will then be used as the comparison attribute of each element in the list to
sorted. If a different time format is specified using the 'time_format' keyword,
then the returned value will be the corresponding header value.
"""
format_to_kwrd = {
'bmjd': 'BMJD_OBS',
'hmjd': 'HMJD_OSB',
'mjd': 'MJD_OBS',
'utc': 'UTCS_OSB',
'date': 'DATE_OBS',
'dce': 'ET_OBS'
}
if format_to_kwrd.get(time_format):
return hdu.hdr[format_to_kwrd.get(time_format)]
else:
return hdu.timestamp
| mit |
Ritiek/Spotify-Downloader | spotdl/encode/encoders/ffmpeg.py | 1 | 5171 | import subprocess
import os
from spotdl.encode import EncoderBase
from spotdl.encode.exceptions import EncoderNotFoundError
from spotdl.encode.exceptions import FFmpegNotFoundError
import logging
logger = logging.getLogger(__name__)
# Key: from format
# Subkey: to format
RULES = {
"m4a": {
"mp3": "-codec:v copy -codec:a libmp3lame",
"opus": "-codec:a libopus",
"m4a": "-acodec copy",
"flac": "-codec:a flac",
"ogg": "-codec:a libvorbis -q:a 5",
},
"opus": {
"mp3": "-codec:a libmp3lame",
"m4a": "-cutoff 20000 -codec:a aac",
"flac": "-codec:a flac",
"ogg": "-codec:a libvorbis -q:a 5",
"opus": "-acodec copy",
},
}
class EncoderFFmpeg(EncoderBase):
"""
A class for encoding media files using FFmpeg.
Parameters
----------
encoder_path: `str`
Path to FFmpeg.
must_exist: `bool`
Error out immediately if the encoder isn't found in
``encoder_path``.
Examples
--------
+ Re-encode an OPUS stream from STDIN to an MP3:
>>> import os
>>> input_path = "audio.opus"
>>> target_path = "audio.mp3"
>>> input_path_size = os.path.getsize(input_path)
>>>
>>> from spotdl.encode.encoders import EncoderFFmpeg
>>> ffmpeg = EncoderFFmpeg()
>>> process = ffmpeg.re_encode_from_stdin(
... input_encoding="opus",
... target_path=target_path
... )
>>>
>>> chunk_size = 4096
>>> total_chunks = (input_path_size // chunk_size) + 1
>>>
>>> with open(input_path, "rb") as fin:
... for chunk_number in range(1, total_chunks+1):
... chunk = fin.read(chunk_size)
... process.stdin.write(chunk)
... print("chunks encoded: {}/{}".format(
... chunk_number,
... total_chunks,
... ))
>>>
>>> process.stdin.close()
>>> process.wait()
"""
def __init__(self, encoder_path="ffmpeg", must_exist=True):
_loglevel = "-hide_banner -nostats -v warning"
_additional_arguments = ["-b:a", "192k", "-vn"]
try:
super().__init__(encoder_path, must_exist, _loglevel, _additional_arguments)
except EncoderNotFoundError as e:
raise FFmpegNotFoundError(e.args[0])
self._rules = RULES
def set_trim_silence(self):
self.set_argument("-af silenceremove=start_periods=1")
def get_encoding(self, path):
return super().get_encoding(path)
def _generate_encoding_arguments(self, input_encoding, target_encoding):
initial_arguments = self._rules.get(input_encoding)
if initial_arguments is None:
raise TypeError(
'The input format ("{}") is not supported.'.format(
input_encoding,
))
arguments = initial_arguments.get(target_encoding)
if arguments is None:
raise TypeError(
'The output format ("{}") is not supported.'.format(
target_encoding,
))
return arguments
def set_debuglog(self):
self._loglevel = "-loglevel debug"
def _generate_encode_command(self, input_path, target_path,
input_encoding=None, target_encoding=None):
if input_encoding is None:
input_encoding = self.get_encoding(input_path)
if target_encoding is None:
target_encoding = self.get_encoding(target_path)
arguments = self._generate_encoding_arguments(
input_encoding,
target_encoding
)
command = [self.encoder_path] \
+ ["-y", "-nostdin"] \
+ self._loglevel.split() \
+ ["-i", input_path] \
+ arguments.split() \
+ self._additional_arguments \
+ ["-f", self.target_format_from_encoding(target_encoding)] \
+ [target_path]
return command
def re_encode(self, input_path, target_path, target_encoding=None, delete_original=False):
encode_command = self._generate_encode_command(
input_path,
target_path,
target_encoding=target_encoding
)
logger.debug("Calling FFmpeg with:\n{command}".format(
command=encode_command,
))
process = subprocess.Popen(encode_command)
process.wait()
encode_successful = process.returncode == 0
if encode_successful and delete_original:
os.remove(input_path)
return process
def re_encode_from_stdin(self, input_encoding, target_path, target_encoding=None):
encode_command = self._generate_encode_command(
"-",
target_path,
input_encoding=input_encoding,
target_encoding=target_encoding,
)
logger.debug("Calling FFmpeg with:\n{command}".format(
command=encode_command,
))
process = subprocess.Popen(encode_command, stdin=subprocess.PIPE)
return process
| mit |
ClusterLabs/pcs | pcs_test/tools/command_env/config_http_files.py | 3 | 4688 | import base64
import json
from pcs_test.tools.command_env.mock_node_communicator import (
place_multinode_call,
)
class FilesShortcuts:
def __init__(self, calls):
self.__calls = calls
def put_files(
self,
node_labels=None,
pcmk_authkey=None,
corosync_authkey=None,
corosync_conf=None,
pcs_disaster_recovery_conf=None,
pcs_settings_conf=None,
communication_list=None,
name="http.files.put_files",
):
# pylint: disable=too-many-arguments
"""
Create a call for the files distribution to the nodes.
node_labels list -- create success responses from these nodes
pcmk_authkey bytes -- content of pacemaker authkey file
corosync_authkey bytes -- content of corosync authkey file
corosync_conf string -- content of corosync.conf
pcs_disaster_recovery_conf string -- content of pcs DR config
pcs_settings_conf string -- content of pcs_settings.conf
communication_list list -- create custom responses
name string -- the key of this call
"""
input_data = {}
output_data = {}
written_output_dict = dict(
code="written",
message="",
)
if pcmk_authkey:
file_id = "pacemaker_remote authkey"
input_data[file_id] = dict(
data=base64.b64encode(pcmk_authkey).decode("utf-8"),
type="pcmk_remote_authkey",
rewrite_existing=True,
)
output_data[file_id] = written_output_dict
if corosync_authkey:
file_id = "corosync authkey"
input_data[file_id] = dict(
data=base64.b64encode(corosync_authkey).decode("utf-8"),
type="corosync_authkey",
rewrite_existing=True,
)
output_data[file_id] = written_output_dict
if corosync_conf:
file_id = "corosync.conf"
input_data[file_id] = dict(
data=corosync_conf,
type="corosync_conf",
)
output_data[file_id] = written_output_dict
if pcs_disaster_recovery_conf:
file_id = "disaster-recovery config"
input_data[file_id] = dict(
data=base64.b64encode(pcs_disaster_recovery_conf).decode(
"utf-8"
),
type="pcs_disaster_recovery_conf",
rewrite_existing=True,
)
output_data[file_id] = written_output_dict
if pcs_settings_conf:
file_id = "pcs_settings.conf"
input_data[file_id] = dict(
data=pcs_settings_conf,
type="pcs_settings_conf",
rewrite_existing=True,
)
output_data[file_id] = written_output_dict
place_multinode_call(
self.__calls,
name,
node_labels,
communication_list,
action="remote/put_file",
param_list=[("data_json", json.dumps(input_data))],
output=json.dumps(dict(files=output_data)),
)
def remove_files(
self,
node_labels=None,
pcsd_settings=False,
pcs_disaster_recovery_conf=False,
communication_list=None,
name="http.files.remove_files",
):
"""
Create a call for removing the files on the nodes.
node_labels list -- create success responses from these nodes
pcsd_settings bool -- if True, remove file pcsd_settings
pcs_disaster_recovery_conf bool -- if True, remove pcs DR config
communication_list list -- create custom responses
name string -- the key of this call
"""
input_data = {}
output_data = {}
if pcsd_settings:
file_id = "pcsd settings"
input_data[file_id] = dict(type="pcsd_settings")
output_data[file_id] = dict(
code="deleted",
message="",
)
if pcs_disaster_recovery_conf:
file_id = "pcs disaster-recovery config"
input_data[file_id] = dict(type="pcs_disaster_recovery_conf")
output_data[file_id] = dict(
code="deleted",
message="",
)
place_multinode_call(
self.__calls,
name,
node_labels,
communication_list,
action="remote/remove_file",
param_list=[("data_json", json.dumps(input_data))],
output=json.dumps(dict(files=output_data)),
)
| gpl-2.0 |
mdavid/pledgeservice | testlib/setuptools/command/test.py | 113 | 6526 | import unittest
from unittest import TestLoader
from setuptools import Command
from distutils.errors import DistutilsOptionError
import sys
from pkg_resources import (resource_listdir, resource_exists,
normalize_path, working_set, _namespace_packages, add_activation_listener,
require, EntryPoint)
from setuptools.py31compat import unittest_main
class ScanningLoader(TestLoader):
def loadTestsFromModule(self, module):
"""Return a suite of all tests cases contained in the given module
If the module is a package, load tests from all the modules in it.
If the module has an ``additional_tests`` function, call it and add
the return value to the tests.
"""
tests = []
if module.__name__ != 'setuptools.tests.doctest': # ugh
tests.append(TestLoader.loadTestsFromModule(self, module))
if hasattr(module, "additional_tests"):
tests.append(module.additional_tests())
if hasattr(module, '__path__'):
for file in resource_listdir(module.__name__, ''):
if file.endswith('.py') and file != '__init__.py':
submodule = module.__name__ + '.' + file[:-3]
else:
if resource_exists(module.__name__, file + '/__init__.py'):
submodule = module.__name__+'.'+file
else:
continue
tests.append(self.loadTestsFromName(submodule))
if len(tests) != 1:
return self.suiteClass(tests)
else:
return tests[0] # don't create a nested suite for only one return
class test(Command):
"""Command to run unit tests after in-place build"""
description = "run unit tests after in-place build"
user_options = [
('test-module=','m', "Run 'test_suite' in specified module"),
('test-suite=','s',
"Test suite to run (e.g. 'some_module.test_suite')"),
('test-runner=', 'r', "Test runner to use"),
]
def initialize_options(self):
self.test_suite = None
self.test_module = None
self.test_loader = None
self.test_runner = None
def finalize_options(self):
if self.test_suite is None:
if self.test_module is None:
self.test_suite = self.distribution.test_suite
else:
self.test_suite = self.test_module + ".test_suite"
elif self.test_module:
raise DistutilsOptionError(
"You may specify a module or a suite, but not both"
)
self.test_args = [self.test_suite]
if self.verbose:
self.test_args.insert(0,'--verbose')
if self.test_loader is None:
self.test_loader = getattr(self.distribution, 'test_loader', None)
if self.test_loader is None:
self.test_loader = "setuptools.command.test:ScanningLoader"
if self.test_runner is None:
self.test_runner = getattr(self.distribution, 'test_runner', None)
def with_project_on_sys_path(self, func):
with_2to3 = (
sys.version_info >= (3,)
and getattr(self.distribution, 'use_2to3', False)
)
if with_2to3:
# If we run 2to3 we can not do this inplace:
# Ensure metadata is up-to-date
self.reinitialize_command('build_py', inplace=0)
self.run_command('build_py')
bpy_cmd = self.get_finalized_command("build_py")
build_path = normalize_path(bpy_cmd.build_lib)
# Build extensions
self.reinitialize_command('egg_info', egg_base=build_path)
self.run_command('egg_info')
self.reinitialize_command('build_ext', inplace=0)
self.run_command('build_ext')
else:
# Without 2to3 inplace works fine:
self.run_command('egg_info')
# Build extensions in-place
self.reinitialize_command('build_ext', inplace=1)
self.run_command('build_ext')
ei_cmd = self.get_finalized_command("egg_info")
old_path = sys.path[:]
old_modules = sys.modules.copy()
try:
sys.path.insert(0, normalize_path(ei_cmd.egg_base))
working_set.__init__()
add_activation_listener(lambda dist: dist.activate())
require('%s==%s' % (ei_cmd.egg_name, ei_cmd.egg_version))
func()
finally:
sys.path[:] = old_path
sys.modules.clear()
sys.modules.update(old_modules)
working_set.__init__()
def run(self):
if self.distribution.install_requires:
self.distribution.fetch_build_eggs(self.distribution.install_requires)
if self.distribution.tests_require:
self.distribution.fetch_build_eggs(self.distribution.tests_require)
if self.test_suite:
cmd = ' '.join(self.test_args)
if self.dry_run:
self.announce('skipping "unittest %s" (dry run)' % cmd)
else:
self.announce('running "unittest %s"' % cmd)
self.with_project_on_sys_path(self.run_tests)
def run_tests(self):
# Purge modules under test from sys.modules. The test loader will
# re-import them from the build location. Required when 2to3 is used
# with namespace packages.
if sys.version_info >= (3,) and getattr(self.distribution, 'use_2to3', False):
module = self.test_args[-1].split('.')[0]
if module in _namespace_packages:
del_modules = []
if module in sys.modules:
del_modules.append(module)
module += '.'
for name in sys.modules:
if name.startswith(module):
del_modules.append(name)
list(map(sys.modules.__delitem__, del_modules))
unittest_main(
None, None, [unittest.__file__]+self.test_args,
testLoader=self._resolve_as_ep(self.test_loader),
testRunner=self._resolve_as_ep(self.test_runner),
)
@staticmethod
def _resolve_as_ep(val):
"""
Load the indicated attribute value, called, as a as if it were
specified as an entry point.
"""
if val is None:
return
parsed = EntryPoint.parse("x=" + val)
return parsed.load(require=False)()
| agpl-3.0 |
hfeeki/transifex | transifex/projects/search_indexes.py | 1 | 1115 | import datetime
from haystack.indexes import *
from haystack import site
from transifex.projects.models import Project
class ProjectIndex(RealTimeSearchIndex):
text = CharField(document=True, use_template=True)
slug = CharField(model_attr='slug', null=False)
name = CharField(model_attr='name', null=False, boost=1.125)
description = CharField(model_attr='description', null=True)
# django-haystack-1.2 needs it along with the custom prepare method
suggestions = CharField()
def prepare(self, obj):
prepared_data = super(ProjectIndex, self).prepare(obj)
prepared_data['suggestions'] = prepared_data['text']
return prepared_data
def index_queryset(self):
"""Used when the entire index for model is updated."""
# Do not index private projects
return Project.objects.exclude(private=True).filter(
modified__lte=datetime.datetime.now())
def get_updated_field(self):
"""Project mode field used to identify new/modified object to index."""
return 'modified'
site.register(Project, ProjectIndex) | gpl-2.0 |
MiltosD/CEF-ELRC | lib/python2.7/site-packages/django/contrib/auth/tests/models.py | 318 | 1493 | from django.conf import settings
from django.test import TestCase
from django.contrib.auth.models import User, SiteProfileNotAvailable
class ProfileTestCase(TestCase):
fixtures = ['authtestdata.json']
def setUp(self):
"""Backs up the AUTH_PROFILE_MODULE"""
self.old_AUTH_PROFILE_MODULE = getattr(settings,
'AUTH_PROFILE_MODULE', None)
def tearDown(self):
"""Restores the AUTH_PROFILE_MODULE -- if it was not set it is deleted,
otherwise the old value is restored"""
if self.old_AUTH_PROFILE_MODULE is None and \
hasattr(settings, 'AUTH_PROFILE_MODULE'):
del settings.AUTH_PROFILE_MODULE
if self.old_AUTH_PROFILE_MODULE is not None:
settings.AUTH_PROFILE_MODULE = self.old_AUTH_PROFILE_MODULE
def test_site_profile_not_available(self):
# calling get_profile without AUTH_PROFILE_MODULE set
if hasattr(settings, 'AUTH_PROFILE_MODULE'):
del settings.AUTH_PROFILE_MODULE
user = User.objects.get(username='testclient')
self.assertRaises(SiteProfileNotAvailable, user.get_profile)
# Bad syntax in AUTH_PROFILE_MODULE:
settings.AUTH_PROFILE_MODULE = 'foobar'
self.assertRaises(SiteProfileNotAvailable, user.get_profile)
# module that doesn't exist
settings.AUTH_PROFILE_MODULE = 'foo.bar'
self.assertRaises(SiteProfileNotAvailable, user.get_profile)
| bsd-3-clause |
GheRivero/ansible | lib/ansible/modules/storage/netapp/netapp_e_hostgroup.py | 45 | 13964 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netapp_e_hostgroup
version_added: "2.2"
short_description: Manage NetApp Storage Array Host Groups
author: Kevin Hulquest (@hulquest)
description:
- Create, update or destroy host groups on a NetApp E-Series storage array.
options:
api_username:
required: true
description:
- The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_password:
required: true
description:
- The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_url:
required: true
description:
- The url to the SANtricity WebServices Proxy or embedded REST API.
validate_certs:
required: false
default: true
description:
- Should https certificates be validated?
ssid:
required: true
description:
- The ID of the array to manage (as configured on the web services proxy).
state:
required: true
description:
- Whether the specified host group should exist or not.
choices: ['present', 'absent']
name:
required: false
description:
- The name of the host group to manage. Either this or C(id_num) must be supplied.
new_name:
required: false
description:
- specify this when you need to update the name of a host group
id:
required: false
description:
- The id number of the host group to manage. Either this or C(name) must be supplied.
hosts:
required: false
description:
- a list of host names/labels to add to the group
'''
EXAMPLES = '''
- name: Configure Hostgroup
netapp_e_hostgroup:
ssid: "{{ ssid }}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
validate_certs: "{{ netapp_api_validate_certs }}"
state: present
'''
RETURN = '''
clusterRef:
description: The unique identification value for this object. Other objects may use this reference value to refer to the cluster.
returned: always except when state is absent
type: string
sample: "3233343536373839303132333100000000000000"
confirmLUNMappingCreation:
description: If true, indicates that creation of LUN-to-volume mappings should require careful confirmation from the end-user, since such a mapping
will alter the volume access rights of other clusters, in addition to this one.
returned: always
type: boolean
sample: false
hosts:
description: A list of the hosts that are part of the host group after all operations.
returned: always except when state is absent
type: list
sample: ["HostA","HostB"]
id:
description: The id number of the hostgroup
returned: always except when state is absent
type: string
sample: "3233343536373839303132333100000000000000"
isSAControlled:
description: If true, indicates that I/O accesses from this cluster are subject to the storage array's default LUN-to-volume mappings. If false,
indicates that I/O accesses from the cluster are subject to cluster-specific LUN-to-volume mappings.
returned: always except when state is absent
type: boolean
sample: false
label:
description: The user-assigned, descriptive label string for the cluster.
returned: always
type: string
sample: "MyHostGroup"
name:
description: same as label
returned: always except when state is absent
type: string
sample: "MyHostGroup"
protectionInformationCapableAccessMethod:
description: This field is true if the host has a PI capable access method.
returned: always except when state is absent
type: boolean
sample: true
'''
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json"
}
import json
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves.urllib.error import HTTPError
from ansible.module_utils._text import to_native
from ansible.module_utils.urls import open_url
def request(url, data=None, headers=None, method='GET', use_proxy=True,
force=False, last_mod_time=None, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
try:
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
url_username=url_username, url_password=url_password, http_agent=http_agent,
force_basic_auth=force_basic_auth)
except HTTPError as e:
r = e.fp
try:
raw_data = r.read()
if raw_data:
data = json.loads(raw_data)
else:
raw_data = None
except:
if ignore_errors:
pass
else:
raise Exception(raw_data)
resp_code = r.getcode()
if resp_code >= 400 and not ignore_errors:
raise Exception(resp_code, data)
else:
return resp_code, data
def group_exists(module, id_type, ident, ssid, api_url, user, pwd):
rc, data = get_hostgroups(module, ssid, api_url, user, pwd)
for group in data:
if group[id_type] == ident:
return True, data
else:
continue
return False, data
def get_hostgroups(module, ssid, api_url, user, pwd):
groups = "storage-systems/%s/host-groups" % ssid
url = api_url + groups
try:
rc, data = request(url, headers=HEADERS, url_username=user, url_password=pwd)
return rc, data
except HTTPError as e:
module.fail_json(msg="Failed to get host groups. Id [%s]. Error [%s]." % (ssid, to_native(e)))
def get_hostref(module, ssid, name, api_url, user, pwd):
all_hosts = 'storage-systems/%s/hosts' % ssid
url = api_url + all_hosts
try:
rc, data = request(url, method='GET', headers=HEADERS, url_username=user, url_password=pwd)
except Exception as e:
module.fail_json(msg="Failed to get hosts. Id [%s]. Error [%s]." % (ssid, to_native(e)))
for host in data:
if host['name'] == name:
return host['hostRef']
else:
continue
module.fail_json(msg="No host with the name %s could be found" % name)
def create_hostgroup(module, ssid, name, api_url, user, pwd, hosts=None):
groups = "storage-systems/%s/host-groups" % ssid
url = api_url + groups
hostrefs = []
if hosts:
for host in hosts:
href = get_hostref(module, ssid, host, api_url, user, pwd)
hostrefs.append(href)
post_data = json.dumps(dict(name=name, hosts=hostrefs))
try:
rc, data = request(url, method='POST', data=post_data, headers=HEADERS, url_username=user, url_password=pwd)
except Exception as e:
module.fail_json(msg="Failed to create host group. Id [%s]. Error [%s]." % (ssid, to_native(e)))
return rc, data
def update_hostgroup(module, ssid, name, api_url, user, pwd, hosts=None, new_name=None):
gid = get_hostgroup_id(module, ssid, name, api_url, user, pwd)
groups = "storage-systems/%s/host-groups/%s" % (ssid, gid)
url = api_url + groups
hostrefs = []
if hosts:
for host in hosts:
href = get_hostref(module, ssid, host, api_url, user, pwd)
hostrefs.append(href)
if new_name:
post_data = json.dumps(dict(name=new_name, hosts=hostrefs))
else:
post_data = json.dumps(dict(hosts=hostrefs))
try:
rc, data = request(url, method='POST', data=post_data, headers=HEADERS, url_username=user, url_password=pwd)
except Exception as e:
module.fail_json(msg="Failed to update host group. Group [%s]. Id [%s]. Error [%s]." % (gid, ssid,
to_native(e)))
return rc, data
def delete_hostgroup(module, ssid, group_id, api_url, user, pwd):
groups = "storage-systems/%s/host-groups/%s" % (ssid, group_id)
url = api_url + groups
# TODO: Loop through hosts, do mapping to href, make new list to pass to data
try:
rc, data = request(url, method='DELETE', headers=HEADERS, url_username=user, url_password=pwd)
except Exception as e:
module.fail_json(msg="Failed to delete host group. Group [%s]. Id [%s]. Error [%s]." % (group_id, ssid, to_native(e)))
return rc, data
def get_hostgroup_id(module, ssid, name, api_url, user, pwd):
all_groups = 'storage-systems/%s/host-groups' % ssid
url = api_url + all_groups
rc, data = request(url, method='GET', headers=HEADERS, url_username=user, url_password=pwd)
for hg in data:
if hg['name'] == name:
return hg['id']
else:
continue
module.fail_json(msg="A hostgroup with the name %s could not be found" % name)
def get_hosts_in_group(module, ssid, group_name, api_url, user, pwd):
all_groups = 'storage-systems/%s/host-groups' % ssid
g_url = api_url + all_groups
try:
g_rc, g_data = request(g_url, method='GET', headers=HEADERS, url_username=user, url_password=pwd)
except Exception as e:
module.fail_json(
msg="Failed in first step getting hosts from group. Group: [%s]. Id [%s]. Error [%s]." % (group_name,
ssid,
to_native(e)))
all_hosts = 'storage-systems/%s/hosts' % ssid
h_url = api_url + all_hosts
try:
h_rc, h_data = request(h_url, method='GET', headers=HEADERS, url_username=user, url_password=pwd)
except Exception as e:
module.fail_json(
msg="Failed in second step getting hosts from group. Group: [%s]. Id [%s]. Error [%s]." % (
group_name,
ssid,
to_native(e)))
hosts_in_group = []
for hg in g_data:
if hg['name'] == group_name:
clusterRef = hg['clusterRef']
for host in h_data:
if host['clusterRef'] == clusterRef:
hosts_in_group.append(host['name'])
return hosts_in_group
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=False),
new_name=dict(required=False),
ssid=dict(required=True),
id=dict(required=False),
state=dict(required=True, choices=['present', 'absent']),
hosts=dict(required=False, type='list'),
api_url=dict(required=True),
api_username=dict(required=True),
validate_certs=dict(required=False, default=True),
api_password=dict(required=True, no_log=True)
),
supports_check_mode=False,
mutually_exclusive=[['name', 'id']],
required_one_of=[['name', 'id']]
)
name = module.params['name']
new_name = module.params['new_name']
ssid = module.params['ssid']
id_num = module.params['id']
state = module.params['state']
hosts = module.params['hosts']
user = module.params['api_username']
pwd = module.params['api_password']
api_url = module.params['api_url']
if not api_url.endswith('/'):
api_url += '/'
if name:
id_type = 'name'
id_key = name
elif id_num:
id_type = 'id'
id_key = id_num
exists, group_data = group_exists(module, id_type, id_key, ssid, api_url, user, pwd)
if state == 'present':
if not exists:
try:
rc, data = create_hostgroup(module, ssid, name, api_url, user, pwd, hosts)
except Exception as e:
module.fail_json(msg="Failed to create a host group. Id [%s]. Error [%s]." % (ssid, to_native(e)))
hosts = get_hosts_in_group(module, ssid, name, api_url, user, pwd)
module.exit_json(changed=True, hosts=hosts, **data)
else:
current_hosts = get_hosts_in_group(module, ssid, name, api_url, user, pwd)
if not current_hosts:
current_hosts = []
if not hosts:
hosts = []
if set(current_hosts) != set(hosts):
try:
rc, data = update_hostgroup(module, ssid, name, api_url, user, pwd, hosts, new_name)
except Exception as e:
module.fail_json(
msg="Failed to update host group. Group: [%s]. Id [%s]. Error [%s]." % (name, ssid, to_native(e)))
module.exit_json(changed=True, hosts=hosts, **data)
else:
for group in group_data:
if group['name'] == name:
module.exit_json(changed=False, hosts=current_hosts, **group)
elif state == 'absent':
if exists:
hg_id = get_hostgroup_id(module, ssid, name, api_url, user, pwd)
try:
rc, data = delete_hostgroup(module, ssid, hg_id, api_url, user, pwd)
except Exception as e:
module.fail_json(
msg="Failed to delete host group. Group: [%s]. Id [%s]. Error [%s]." % (name, ssid, to_native(e)))
module.exit_json(changed=True, msg="Host Group deleted")
else:
module.exit_json(changed=False, msg="Host Group is already absent")
if __name__ == '__main__':
main()
| gpl-3.0 |
fxtentacle/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/net/bugzilla/bug.py | 125 | 5394 | # Copyright (c) 2009 Google Inc. All rights reserved.
# Copyright (c) 2009 Apple Inc. All rights reserved.
# Copyright (c) 2010 Research In Motion Limited. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
from .attachment import Attachment
class Bug(object):
# FIXME: This class is kinda a hack for now. It exists so we have one
# place to hold bug logic, even if much of the code deals with
# dictionaries still.
def __init__(self, bug_dictionary, bugzilla):
self.bug_dictionary = bug_dictionary
self._bugzilla = bugzilla
def id(self):
return self.bug_dictionary["id"]
def title(self):
# FIXME: Do we need to HTML unescape the title?
return self.bug_dictionary["title"]
def reporter_email(self):
return self.bug_dictionary["reporter_email"]
def assigned_to_email(self):
return self.bug_dictionary["assigned_to_email"]
def cc_emails(self):
return self.bug_dictionary["cc_emails"]
# FIXME: This information should be stored in some sort of webkit_config.py instead of here.
unassigned_emails = frozenset([
"[email protected]",
"[email protected]",
])
def is_unassigned(self):
return self.assigned_to_email() in self.unassigned_emails
def status(self):
return self.bug_dictionary["bug_status"]
# Bugzilla has many status states we don't really use in WebKit:
# https://bugs.webkit.org/page.cgi?id=fields.html#status
_open_states = ["UNCONFIRMED", "NEW", "ASSIGNED", "REOPENED"]
_closed_states = ["RESOLVED", "VERIFIED", "CLOSED"]
def is_open(self):
return self.status() in self._open_states
def is_closed(self):
return not self.is_open()
def duplicate_of(self):
return self.bug_dictionary.get('dup_id', None)
# Rarely do we actually want obsolete attachments
def attachments(self, include_obsolete=False):
attachments = self.bug_dictionary["attachments"]
if not include_obsolete:
attachments = filter(lambda attachment:
not attachment["is_obsolete"], attachments)
return [Attachment(attachment, self) for attachment in attachments]
def patches(self, include_obsolete=False):
return [patch for patch in self.attachments(include_obsolete)
if patch.is_patch()]
def unreviewed_patches(self):
return [patch for patch in self.patches() if patch.review() == "?"]
def reviewed_patches(self, include_invalid=False):
patches = [patch for patch in self.patches() if patch.review() == "+"]
if include_invalid:
return patches
# Checking reviewer() ensures that it was both reviewed and has a valid
# reviewer.
return filter(lambda patch: patch.reviewer(), patches)
def commit_queued_patches(self, include_invalid=False):
patches = [patch for patch in self.patches()
if patch.commit_queue() == "+"]
if include_invalid:
return patches
# Checking committer() ensures that it was both commit-queue+'d and has
# a valid committer.
return filter(lambda patch: patch.committer(), patches)
def comments(self):
return self.bug_dictionary["comments"]
def is_in_comments(self, message):
for comment in self.comments():
if message in comment["text"]:
return True
return False
def commit_revision(self):
# Sort the comments in reverse order as we want the latest committed revision.
r = re.compile("Committed r(?P<svn_revision>\d+)")
for comment in sorted(self.comments(), reverse=True):
rev = r.search(comment['text'])
if rev:
return int(rev.group('svn_revision'))
return None
| bsd-3-clause |
wireservice/agate | agate/csv_py3.py | 1 | 4555 | #!/usr/bin/env python
"""
This module contains the Python 3 replacement for :mod:`csv`.
"""
import csv
import six
from agate.exceptions import FieldSizeLimitError
POSSIBLE_DELIMITERS = [',', '\t', ';', ' ', ':', '|']
class Reader(six.Iterator):
"""
A wrapper around Python 3's builtin :func:`csv.reader`.
"""
def __init__(self, f, field_size_limit=None, line_numbers=False, header=True, **kwargs):
self.line_numbers = line_numbers
self.header = header
if field_size_limit:
csv.field_size_limit(field_size_limit)
self.reader = csv.reader(f, **kwargs)
def __iter__(self):
return self
def __next__(self):
try:
row = next(self.reader)
except csv.Error as e:
# Terrible way to test for this exception, but there is no subclass
if 'field larger than field limit' in str(e):
raise FieldSizeLimitError(csv.field_size_limit())
else:
raise e
if not self.line_numbers:
return row
else:
if self.line_numbers:
if self.header and self.line_num == 1:
row.insert(0, 'line_numbers')
else:
row.insert(0, str(self.line_num - 1 if self.header else self.line_num))
return row
@property
def dialect(self):
return self.reader.dialect
@property
def line_num(self):
return self.reader.line_num
class Writer(object):
"""
A wrapper around Python 3's builtin :func:`csv.writer`.
"""
def __init__(self, f, line_numbers=False, **kwargs):
self.row_count = 0
self.line_numbers = line_numbers
if 'lineterminator' not in kwargs:
kwargs['lineterminator'] = '\n'
self.writer = csv.writer(f, **kwargs)
def _append_line_number(self, row):
if self.row_count == 0:
row.insert(0, 'line_number')
else:
row.insert(0, self.row_count)
self.row_count += 1
def writerow(self, row):
if self.line_numbers:
row = list(row)
self._append_line_number(row)
# Convert embedded Mac line endings to unix style line endings so they get quoted
row = [i.replace('\r', '\n') if isinstance(i, six.string_types) else i for i in row]
self.writer.writerow(row)
def writerows(self, rows):
for row in rows:
self.writerow(row)
class DictReader(csv.DictReader):
"""
A wrapper around Python 3's builtin :class:`csv.DictReader`.
"""
pass
class DictWriter(csv.DictWriter):
"""
A wrapper around Python 3's builtin :class:`csv.DictWriter`.
"""
def __init__(self, f, fieldnames, line_numbers=False, **kwargs):
self.row_count = 0
self.line_numbers = line_numbers
if 'lineterminator' not in kwargs:
kwargs['lineterminator'] = '\n'
if self.line_numbers:
fieldnames.insert(0, 'line_number')
csv.DictWriter.__init__(self, f, fieldnames, **kwargs)
def _append_line_number(self, row):
if self.row_count == 0:
row['line_number'] = 'line_number'
else:
row['line_number'] = self.row_count
self.row_count += 1
def writerow(self, row):
# Convert embedded Mac line endings to unix style line endings so they get quoted
row = dict([(k, v.replace('\r', '\n')) if isinstance(v, six.string_types) else (k, v) for k, v in row.items()])
if self.line_numbers:
self._append_line_number(row)
csv.DictWriter.writerow(self, row)
def writerows(self, rows):
for row in rows:
self.writerow(row)
class Sniffer(object):
"""
A functional wrapper of ``csv.Sniffer()``.
"""
def sniff(self, sample):
"""
A functional version of ``csv.Sniffer().sniff``, that extends the
list of possible delimiters to include some seen in the wild.
"""
try:
dialect = csv.Sniffer().sniff(sample, POSSIBLE_DELIMITERS)
except:
dialect = None
return dialect
def reader(*args, **kwargs):
"""
A replacement for Python's :func:`csv.reader` that uses
:class:`.csv_py3.Reader`.
"""
return Reader(*args, **kwargs)
def writer(*args, **kwargs):
"""
A replacement for Python's :func:`csv.writer` that uses
:class:`.csv_py3.Writer`.
"""
return Writer(*args, **kwargs)
| mit |
atpy/atpy | atpy/fitstable.py | 1 | 11686 | from __future__ import print_function, division
import os
import numpy as np
from astropy.io import fits
from .exceptions import TableException
from .helpers import smart_dtype, smart_mask
from .decorators import auto_download_to_file, auto_fileobj_to_file
standard_keys = ['XTENSION', 'NAXIS', 'NAXIS1', 'NAXIS2', 'TFIELDS', \
'PCOUNT', 'GCOUNT', 'BITPIX', 'EXTNAME']
# Define type conversion dictionary
type_dict = {}
type_dict[np.bool_] = "L"
type_dict[np.int8] = "B"
type_dict[np.uint8] = "B"
type_dict[np.int16] = "I"
type_dict[np.uint16] = "I"
type_dict[np.int32] = "J"
type_dict[np.uint32] = "J"
type_dict[np.int64] = "K"
type_dict[np.uint64] = "K"
type_dict[np.float32] = "E"
type_dict[np.float64] = "D"
type_dict[np.str] = "A"
type_dict[np.string_] = "A"
type_dict[str] = "A"
def _list_tables(filename):
hdulist = fits.open(filename)
tables = {}
for i, hdu in enumerate(hdulist[1:]):
if hdu.header['XTENSION'] in ['BINTABLE', 'ASCIITABLE', 'TABLE']:
tables[i + 1] = hdu.name
hdulist.close()
return tables
# PyFITS can handle compression, so no decompression detection
@auto_download_to_file
@auto_fileobj_to_file
def read(self, filename, hdu=None, memmap=False, verbose=True):
'''
Read a table from a FITS file
Required Arguments:
*filename*: [ string ]
The FITS file to read the table from
Optional Keyword Arguments:
*hdu*: [ integer ]
The HDU to read from the FITS file (this is only required
if there are more than one table in the FITS file)
*memmap*: [ bool ]
Whether PyFITS should use memory mapping
'''
self.reset()
# If no hdu is requested, check that there is only one table
if not hdu:
tables = _list_tables(filename)
if len(tables) == 0:
raise Exception("No tables in file")
elif len(tables) == 1:
hdu = tables.keys()[0]
else:
raise TableException(tables, 'hdu')
hdulist = fits.open(filename, memmap=memmap)
hdu = hdulist[hdu]
table = hdu.data
header = hdu.header
columns = hdu.columns
# Construct dtype for table
dtype = []
for i in range(len(hdu.data.dtype)):
name = hdu.data.dtype.names[i]
type = hdu.data.dtype[name]
if type.subdtype:
type, shape = type.subdtype
else:
shape = ()
# Get actual FITS format and zero-point
format, bzero = hdu.columns[i].format, hdu.columns[i].bzero
# Remove numbers from format, to find just type
format = format.strip("1234567890.")
if type.type is np.string_ and format in ['I', 'F', 'E', 'D']:
if format == 'I':
type = np.int64
elif format in ['F', 'E']:
type = np.float32
elif format == 'D':
type = np.float64
if format == 'X' and type.type == np.uint8:
type = np.bool
if len(shape) == 1:
shape = (shape[0] * 8,)
if format == 'L':
type = np.bool
if bzero and format in ['B', 'I', 'J']:
if format == 'B' and bzero == -128:
dtype.append((name, np.int8, shape))
elif format == 'I' and bzero == - np.iinfo(np.int16).min:
dtype.append((name, np.uint16, shape))
elif format == 'J' and bzero == - np.iinfo(np.int32).min:
dtype.append((name, np.uint32, shape))
else:
dtype.append((name, type, shape))
else:
dtype.append((name, type, shape))
dtype = np.dtype(dtype)
if self._masked:
self._setup_table(len(hdu.data), dtype, units=columns.units)
else:
self._setup_table(len(hdu.data), dtype, units=columns.units, \
nulls=columns.nulls)
# Populate the table
for i, name in enumerate(columns.names):
format, bzero = hdu.columns[i].format[-1], hdu.columns[i].bzero
if bzero and format in ['B', 'I', 'J']:
data = np.rec.recarray.field(hdu.data, i)
if format == 'B' and bzero == -128:
data = (data.astype(np.int16) + bzero).astype(np.int8)
elif format == 'I' and bzero == - np.iinfo(np.int16).min:
data = (data.astype(np.int32) + bzero).astype(np.uint16)
elif format == 'J' and bzero == - np.iinfo(np.int32).min:
data = (data.astype(np.int64) + bzero).astype(np.uint32)
else:
data = table.field(name)
else:
data = table.field(name)
self.data[name][:] = data[:]
if self._masked:
if columns.nulls[i] == 'NAN.0':
null = np.nan
elif columns.nulls[i] == 'INF.0':
null = np.inf
else:
null = columns.nulls[i]
self.data[name].mask = smart_mask(data, null)
self.data[name].set_fill_value(null)
for key in header.keys():
if not key[:4] in ['TFOR', 'TDIS', 'TDIM', 'TTYP', 'TUNI'] and \
not key in standard_keys:
self.add_keyword(key, header[key])
try:
header['COMMENT']
except KeyError:
pass
else:
# PyFITS used to define header['COMMENT'] as the last comment read in
# (which was a string), but now defines it as a _HeaderCommentaryCards
# object
if isinstance(header['COMMENT'], basestring):
for comment in header.get_comment():
if isinstance(comment, fits.Card):
self.add_comment(comment.value)
else:
self.add_comment(comment)
else:
for comment in header['COMMENT']:
if isinstance(comment, fits.Card):
self.add_comment(comment.value)
else:
self.add_comment(comment)
if hdu.name:
self.table_name = str(hdu.name)
hdulist.close()
return
def _to_hdu(self):
'''
Return the current table as a astropy.io.fits HDU object
'''
columns = []
for name in self.names:
if self._masked:
data = self.data[name].filled()
null = self.data[name].fill_value
if data.ndim > 1:
null = null[0]
if type(null) in [np.bool_, np.bool]:
null = bool(null)
else:
data = self.data[name]
null = self.columns[name].null
unit = self.columns[name].unit
dtype = self.columns[name].dtype
elemwidth = None
if unit == None:
unit = ''
if data.ndim > 1:
elemwidth = str(data.shape[1])
column_type = smart_dtype(dtype)
if column_type == np.string_:
elemwidth = dtype.itemsize
if column_type in type_dict:
if elemwidth:
format = str(elemwidth) + type_dict[column_type]
else:
format = type_dict[column_type]
else:
raise Exception("cannot use numpy type " + str(column_type))
if column_type == np.uint16:
bzero = - np.iinfo(np.int16).min
elif column_type == np.uint32:
bzero = - np.iinfo(np.int32).min
elif column_type == np.uint64:
raise Exception("uint64 unsupported")
elif column_type == np.int8:
bzero = -128
else:
bzero = None
columns.append(fits.Column(name=name, format=format, unit=unit, \
null=null, array=data, bzero=bzero))
hdu = fits.new_table(fits.ColDefs(columns))
try:
hdu.name = self.table_name
except:
hdu.name = ''
for key in self.keywords:
if len(key) > 8:
keyname = "hierarch " + key
else:
keyname = key
try: # PyFITS 3.x
hdu.header[keyname] = self.keywords[key]
except KeyError: # PyFITS 2.x
hdu.header.update(keyname, self.keywords[key])
for comment in self.comments:
hdu.header.add_comment(comment)
return hdu
def write(self, filename, overwrite=False):
'''
Write the table to a FITS file
Required Arguments:
*filename*: [ string ]
The FITS file to write the table to
Optional Keyword Arguments:
*overwrite*: [ True | False ]
Whether to overwrite any existing file without warning
'''
if os.path.exists(filename):
if overwrite:
os.remove(filename)
else:
raise Exception("File exists: %s" % filename)
try:
_to_hdu(self).writeto(filename)
except:
_to_hdu(self).writeto(filename, output_verify='silentfix')
# PyFITS can handle compression, so no decompression detection
@auto_download_to_file
@auto_fileobj_to_file
def read_set(self, filename, memmap=False, verbose=True):
'''
Read all tables from a FITS file
Required Arguments:
*filename*: [ string ]
The FITS file to read the tables from
Optional Keyword Arguments:
*memmap*: [ bool ]
Whether PyFITS should use memory mapping
'''
self.reset()
# Read in primary header
header = fits.getheader(filename, 0)
for key in header.keys():
if not key[:4] in ['TFOR', 'TDIS', 'TDIM', 'TTYP', 'TUNI'] and \
not key in standard_keys:
self.add_keyword(key, header[key])
try:
header['COMMENT']
except KeyError:
pass
else:
# PyFITS used to define header['COMMENT'] as the last comment read in
# (which was a string), but now defines it as a _HeaderCommentaryCards
# object
if isinstance(header['COMMENT'], basestring):
for comment in header.get_comment():
if isinstance(comment, fits.Card):
self.add_comment(comment.value)
else:
self.add_comment(comment)
else:
for comment in header['COMMENT']:
if isinstance(comment, fits.Card):
self.add_comment(comment.value)
else:
self.add_comment(comment)
# Read in tables one by one
from .basetable import Table
for hdu in _list_tables(filename):
table = Table()
read(table, filename, hdu=hdu, memmap=memmap, verbose=verbose)
self.append(table)
def write_set(self, filename, overwrite=False):
'''
Write the tables to a FITS file
Required Arguments:
*filename*: [ string ]
The FITS file to write the tables to
Optional Keyword Arguments:
*overwrite*: [ True | False ]
Whether to overwrite any existing file without warning
'''
if os.path.exists(filename):
if overwrite:
os.remove(filename)
else:
raise Exception("File exists: %s" % filename)
primary = fits.PrimaryHDU()
for key in self.keywords:
if len(key) > 8:
keyname = "hierarch " + key
else:
keyname = key
try: # PyFITS 3.x
primary.header[keyname] = self.keywords[key]
except KeyError: # PyFITS 2.x
primary.header.update(keyname, self.keywords[key])
for comment in self.comments:
primary.header.add_comment(comment)
hdulist = [primary]
for table_key in self.tables:
hdulist.append(_to_hdu(self.tables[table_key]))
hdulist = fits.HDUList(hdulist)
hdulist.writeto(filename)
| mit |
Kraxi/YTplaylist | venv/Lib/site-packages/pip/commands/install.py | 187 | 14659 | from __future__ import absolute_import
import logging
import operator
import os
import tempfile
import shutil
import warnings
try:
import wheel
except ImportError:
wheel = None
from pip.req import RequirementSet
from pip.basecommand import RequirementCommand
from pip.locations import virtualenv_no_global, distutils_scheme
from pip.index import PackageFinder
from pip.exceptions import (
InstallationError, CommandError, PreviousBuildDirError,
)
from pip import cmdoptions
from pip.utils import ensure_dir
from pip.utils.build import BuildDirectory
from pip.utils.deprecation import RemovedInPip8Warning
from pip.utils.filesystem import check_path_owner
from pip.wheel import WheelCache, WheelBuilder
logger = logging.getLogger(__name__)
class InstallCommand(RequirementCommand):
"""
Install packages from:
- PyPI (and other indexes) using requirement specifiers.
- VCS project urls.
- Local project directories.
- Local or remote source archives.
pip also supports installing from "requirements files", which provide
an easy way to specify a whole environment to be installed.
"""
name = 'install'
usage = """
%prog [options] <requirement specifier> [package-index-options] ...
%prog [options] -r <requirements file> [package-index-options] ...
%prog [options] [-e] <vcs project url> ...
%prog [options] [-e] <local project path> ...
%prog [options] <archive url/path> ..."""
summary = 'Install packages.'
def __init__(self, *args, **kw):
super(InstallCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(cmdoptions.constraints())
cmd_opts.add_option(cmdoptions.editable())
cmd_opts.add_option(cmdoptions.requirements())
cmd_opts.add_option(cmdoptions.build_dir())
cmd_opts.add_option(
'-t', '--target',
dest='target_dir',
metavar='dir',
default=None,
help='Install packages into <dir>. '
'By default this will not replace existing files/folders in '
'<dir>. Use --upgrade to replace existing packages in <dir> '
'with new versions.'
)
cmd_opts.add_option(
'-d', '--download', '--download-dir', '--download-directory',
dest='download_dir',
metavar='dir',
default=None,
help=("Download packages into <dir> instead of installing them, "
"regardless of what's already installed."),
)
cmd_opts.add_option(cmdoptions.download_cache())
cmd_opts.add_option(cmdoptions.src())
cmd_opts.add_option(
'-U', '--upgrade',
dest='upgrade',
action='store_true',
help='Upgrade all specified packages to the newest available '
'version. This process is recursive regardless of whether '
'a dependency is already satisfied.'
)
cmd_opts.add_option(
'--force-reinstall',
dest='force_reinstall',
action='store_true',
help='When upgrading, reinstall all packages even if they are '
'already up-to-date.')
cmd_opts.add_option(
'-I', '--ignore-installed',
dest='ignore_installed',
action='store_true',
help='Ignore the installed packages (reinstalling instead).')
cmd_opts.add_option(cmdoptions.no_deps())
cmd_opts.add_option(cmdoptions.install_options())
cmd_opts.add_option(cmdoptions.global_options())
cmd_opts.add_option(
'--user',
dest='use_user_site',
action='store_true',
help="Install to the Python user install directory for your "
"platform. Typically ~/.local/, or %APPDATA%\Python on "
"Windows. (See the Python documentation for site.USER_BASE "
"for full details.)")
cmd_opts.add_option(
'--egg',
dest='as_egg',
action='store_true',
help="Install packages as eggs, not 'flat', like pip normally "
"does. This option is not about installing *from* eggs. "
"(WARNING: Because this option overrides pip's normal install"
" logic, requirements files may not behave as expected.)")
cmd_opts.add_option(
'--root',
dest='root_path',
metavar='dir',
default=None,
help="Install everything relative to this alternate root "
"directory.")
cmd_opts.add_option(
"--compile",
action="store_true",
dest="compile",
default=True,
help="Compile py files to pyc",
)
cmd_opts.add_option(
"--no-compile",
action="store_false",
dest="compile",
help="Do not compile py files to pyc",
)
cmd_opts.add_option(cmdoptions.use_wheel())
cmd_opts.add_option(cmdoptions.no_use_wheel())
cmd_opts.add_option(cmdoptions.no_binary())
cmd_opts.add_option(cmdoptions.only_binary())
cmd_opts.add_option(
'--pre',
action='store_true',
default=False,
help="Include pre-release and development versions. By default, "
"pip only finds stable versions.")
cmd_opts.add_option(cmdoptions.no_clean())
index_opts = cmdoptions.make_option_group(
cmdoptions.index_group,
self.parser,
)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, cmd_opts)
def _build_package_finder(self, options, index_urls, session):
"""
Create a package finder appropriate to this install command.
This method is meant to be overridden by subclasses, not
called directly.
"""
return PackageFinder(
find_links=options.find_links,
format_control=options.format_control,
index_urls=index_urls,
allow_external=options.allow_external,
allow_unverified=options.allow_unverified,
allow_all_external=options.allow_all_external,
trusted_hosts=options.trusted_hosts,
allow_all_prereleases=options.pre,
process_dependency_links=options.process_dependency_links,
session=session,
)
def run(self, options, args):
cmdoptions.resolve_wheel_no_use_binary(options)
cmdoptions.check_install_build_global(options)
if options.download_dir:
options.ignore_installed = True
if options.build_dir:
options.build_dir = os.path.abspath(options.build_dir)
options.src_dir = os.path.abspath(options.src_dir)
install_options = options.install_options or []
if options.use_user_site:
if virtualenv_no_global():
raise InstallationError(
"Can not perform a '--user' install. User site-packages "
"are not visible in this virtualenv."
)
install_options.append('--user')
install_options.append('--prefix=')
temp_target_dir = None
if options.target_dir:
options.ignore_installed = True
temp_target_dir = tempfile.mkdtemp()
options.target_dir = os.path.abspath(options.target_dir)
if (os.path.exists(options.target_dir) and not
os.path.isdir(options.target_dir)):
raise CommandError(
"Target path exists but is not a directory, will not "
"continue."
)
install_options.append('--home=' + temp_target_dir)
global_options = options.global_options or []
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
logger.info('Ignoring indexes: %s', ','.join(index_urls))
index_urls = []
if options.download_cache:
warnings.warn(
"--download-cache has been deprecated and will be removed in "
"the future. Pip now automatically uses and configures its "
"cache.",
RemovedInPip8Warning,
)
with self._build_session(options) as session:
finder = self._build_package_finder(options, index_urls, session)
build_delete = (not (options.no_clean or options.build_dir))
wheel_cache = WheelCache(options.cache_dir, options.format_control)
if options.cache_dir and not check_path_owner(options.cache_dir):
logger.warning(
"The directory '%s' or its parent directory is not owned "
"by the current user and caching wheels has been "
"disabled. check the permissions and owner of that "
"directory. If executing pip with sudo, you may want "
"sudo's -H flag.",
options.cache_dir,
)
options.cache_dir = None
with BuildDirectory(options.build_dir,
delete=build_delete) as build_dir:
requirement_set = RequirementSet(
build_dir=build_dir,
src_dir=options.src_dir,
download_dir=options.download_dir,
upgrade=options.upgrade,
as_egg=options.as_egg,
ignore_installed=options.ignore_installed,
ignore_dependencies=options.ignore_dependencies,
force_reinstall=options.force_reinstall,
use_user_site=options.use_user_site,
target_dir=temp_target_dir,
session=session,
pycompile=options.compile,
isolated=options.isolated_mode,
wheel_cache=wheel_cache,
)
self.populate_requirement_set(
requirement_set, args, options, finder, session, self.name,
wheel_cache
)
if not requirement_set.has_requirements:
return
try:
if (options.download_dir or not wheel or not
options.cache_dir):
# on -d don't do complex things like building
# wheels, and don't try to build wheels when wheel is
# not installed.
requirement_set.prepare_files(finder)
else:
# build wheels before install.
wb = WheelBuilder(
requirement_set,
finder,
build_options=[],
global_options=[],
)
# Ignore the result: a failed wheel will be
# installed from the sdist/vcs whatever.
wb.build(autobuilding=True)
if not options.download_dir:
requirement_set.install(
install_options,
global_options,
root=options.root_path,
)
reqs = sorted(
requirement_set.successfully_installed,
key=operator.attrgetter('name'))
items = []
for req in reqs:
item = req.name
try:
if hasattr(req, 'installed_version'):
if req.installed_version:
item += '-' + req.installed_version
except Exception:
pass
items.append(item)
installed = ' '.join(items)
if installed:
logger.info('Successfully installed %s', installed)
else:
downloaded = ' '.join([
req.name
for req in requirement_set.successfully_downloaded
])
if downloaded:
logger.info(
'Successfully downloaded %s', downloaded
)
except PreviousBuildDirError:
options.no_clean = True
raise
finally:
# Clean up
if not options.no_clean:
requirement_set.cleanup_files()
if options.target_dir:
ensure_dir(options.target_dir)
lib_dir = distutils_scheme('', home=temp_target_dir)['purelib']
for item in os.listdir(lib_dir):
target_item_dir = os.path.join(options.target_dir, item)
if os.path.exists(target_item_dir):
if not options.upgrade:
logger.warning(
'Target directory %s already exists. Specify '
'--upgrade to force replacement.',
target_item_dir
)
continue
if os.path.islink(target_item_dir):
logger.warning(
'Target directory %s already exists and is '
'a link. Pip will not automatically replace '
'links, please remove if replacement is '
'desired.',
target_item_dir
)
continue
if os.path.isdir(target_item_dir):
shutil.rmtree(target_item_dir)
else:
os.remove(target_item_dir)
shutil.move(
os.path.join(lib_dir, item),
target_item_dir
)
shutil.rmtree(temp_target_dir)
return requirement_set
| gpl-2.0 |
loulich/Couchpotato | libs/pyutil/test/deprecated/test_picklesaver.py | 106 | 1340 | #!/usr/bin/env python
# Copyright (c) 2002 Luke 'Artimage' Nelson
# Copyright (c) 2005-2010 Zooko Wilcox-O'Hearn
# This file is part of pyutil; see README.rst for licensing terms.
import os
try:
from twisted.trial import unittest
except ImportError, le:
print "Skipping %s since it requires Twisted and Twisted could not be imported: %s" % (__name__, le,)
else:
from pyutil import PickleSaver, fileutil
class Thingie(PickleSaver.PickleSaver):
def __init__(self, fname, delay=30):
PickleSaver.PickleSaver.__init__(self, fname=fname, attrs={'tmp_store':'False'}, DELAY=delay)
class PickleSaverTest(unittest.TestCase):
def _test_save_now(self, fname):
thingie = Thingie(fname, delay=0)
thingie.tmp_store = 'True'
thingie.lazy_save() # Note: it was constructed with default save delay of 0.
def test_save_now(self):
"""
This test should create a lazy save object, save it with no delay and check if the file exists.
"""
tempdir = fileutil.NamedTemporaryDirectory()
fname = os.path.join(tempdir.name, "picklesavertest")
self._test_save_now(fname)
self.failUnless(os.path.isfile(fname), "The file [%s] does not exist." %(fname,))
tempdir.shutdown()
| gpl-3.0 |
skriticos/ac | src/AniChou/myanimelist.py | 1 | 14742 | # =========================================================================== #
# Name: myanimelist.py
# Purpose: Provide an interface to anime data; syncronize with the MyAnimeList
# server;
#
# Copyright (c) 2008 Gareth Latty
# Copyright (c) 2009 Sebastian Bartos
# Copyright (c) 2009 Daniel Anderson - dankles/evilsage4
#
# License: GPL v3, see COPYING file for details
# =========================================================================== #
import urllib
import urllib2
from cookielib import LWPCookieJar
import socket
from lib.beautifulsoup import BeautifulSoup
import re
import urlparse
from datetime import date, datetime
import os, time
from data import mal_anime_data_schema
from database import db as local_database
from globs import ac_log_path, ac_data_path
class anime_data(object):
"""
Anime data module. Reads and writes local anime data to disk, fetches and
syncs with MyanimeList server.
username: login username
password: login password
db_path: path to database
db: local anime database that is a nested dict and has ASCII-fied series
titles as keys and and fields form mal_anime_data_schema as dict data.
"""
def __init__(self, **kw):
"""
Setup credentials, read local data and setup network connection
environment. Optionally sync with MAL on startup.
Does not take positional arguments. Keyword arguments can either be
given individually (username, password, initsync) or as an
ac_config() instance. This will not be retained.
In the latter form we support some additional command line options.
"""
# When the architecture stabilizes, switch to config as the sole
# positional argument, and retain it instead of copying parts.
# That would also enable reconfiguration at runtime.
self.username = kw.get('username', kw['config'].get('mal', 'username'))
self.password = kw.get('password', kw['config'].get('mal', 'password'))
initsync = kw.get('initsync', kw['config'].get('startup', 'sync'))
try:
self.login = kw['config'].get('mal', 'login')
except KeyError:
# We need a default even if arguments were given individually.
self.login = True
try:
self.mirror = kw['config'].get('mal', 'mirror')
except KeyError:
self.mirror = None
# pull the local DB as a dictionary object
#self.db = {}
self.local_db = local_database()
self.db = self.local_db.get_db()
# setup cookie handler
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(LWPCookieJar()))
urllib2.install_opener(opener)
socket.setdefaulttimeout(40)
if initsync:
self.sync()
def save(self):
""" Only saves the current state to disk w/o network activity.
"""
self.local_db.set_db(self.db)
def sync(self):
"""
Syncronize local anime database with the MyAnimeList server.
(fetch -> compare -> push -> update local)
Return:
nested dict of remote updates with ASCII-fied series titles as
keys and a list of keys that got deleted on the MyAnimeList server.
"""
# Three way switch: login (un)successfull or don't even try.
login = _login(self.username,self.password) if self.login else None
if login is False:
print 'Login failed..'
return False
remoteAnime_db = _getAnimeList(self.username, self.mirror)
if self.db:
# If local DB is already initialized then filter changes
# and push local updates
(remote_updates, local_updates, deleted_entry_keys) = \
_filter_sync_changes(remoteAnime_db, self.db)
_logchanges(remote_updates, local_updates, deleted_entry_keys)
if login:
_push_list(local_updates)
else:
print 'Warning! Your local data goes ouf of sync'
# update local anime list with changes
for key in deleted_entry_keys:
del self.db[key]
for key, value in remote_updates.items():
self.db[key] = value
# write to local DB
self.local_db.set_db(self.db)
return (remote_updates, deleted_entry_keys)
else:
# initialize local data, as it was empty before
self.db = remoteAnime_db
# write to local DB
self.local_db.set_db(self.db)
return (self.db, {})
def fetch(self):
"""
UNUSED
Only fetch anime data from MyAnimeList server (overwrites local data,
if existent). Useful for initializing and resetting local database.
Returns a copy of the fetched database on success, None on failure.
"""
self.db = _getAnimeList(self.username)
# write to local DB
self.local_db.set_db(self.db)
return self.db
def _appInfoURL(user, status = 'all', typ = None):
"""
Safely generate a URL to get XML.
Type may be 'manga'.
"""
# Example taken from the site.
template = 'http://myanimelist.net/malappinfo.php?u=Wile&status=all&type=manga'
# Make tuple mutable.
parts = list(urlparse.urlparse(template))
# New parameters.
query = {'u': user}
if status:
query['status'] = status
if typ:
query['type'] = typ
# urlencode would literally output 'None'.
parts[4] = urllib.urlencode(query)
return urlparse.urlunparse(parts)
def _getAnimeList(username, mirror):
"""
Retrieve Anime XML from MyAnimeList server.
Returns: dictionary object.
Ways in which the ouput of malAppInfo is *not* XML:
Declared as UTF-8 but contains illegal byte sequences (characters)
Uses entities inside CDATA, which is exactly the wrong way round.
It further disagrees with the Expat C extension behind minidom:
Contains tabs and newlines outside of tags.
"""
# This function should be broken up and partly refactored into
# the class to be better configurable.
fetch_url = _appInfoURL(username)
try:
fetch_response = open(mirror, 'rb')
except:
# TODO whatever error open(None) raises.
fetch_response = urllib2.urlopen(fetch_url)
# BeautifulSoup could do the read() and unicode-conversion, if it
# weren't for the illegal characters, as it internally doesn't
# use 'replace'.
fetch_response = unicode(fetch_response.read(), 'utf-8', 'replace')
xmldata = BeautifulSoup.BeautifulStoneSoup(fetch_response)
# For unknown reasons it doesn't work without recursive.
# Nor does iterating over myanimelist.anime. BS documentation broken?
anime_nodes = xmldata.myanimelist.findAll('anime', recursive = True)
# We have to manually convert after getting them out of the CDATA.
entity = lambda m: BeautifulSoup.Tag.XML_ENTITIES_TO_SPECIAL_CHARS[m.group(1)]
# Walk through all the anime nodes and convert the data to a python
# dictionary.
ac_remote_anime_dict = dict()
for anime in anime_nodes:
# ac_node builds the output of our function. Everything added to it
# must either be made independent of the parse tree by calling
# NavigableString.extract() or, preferrably, be turned into a
# different type like unicode(). This is a side-effect of using
# non-mutators like string.strip()
# Failing to do this will crash cPickle.
ac_node = dict()
for node, typ in mal_anime_data_schema.iteritems():
try:
value = getattr(anime, node).string.strip()
# One would think re.sub directly accepts string subclasses
# like NavigableString. Raises a TypeError, though.
value = re.sub(r'&(\w+);', entity, value)
except AttributeError:
continue
if typ is datetime:
# process my_last_updated unix timestamp
ac_node[node] = datetime.fromtimestamp(int(value))
elif typ is int:
# process integer slots
ac_node[node] = int(value)
elif typ is date and value != '0000-00-00':
# proces date slots
(y,m,d) = value.split('-')
(y,m,d) = int(y), int(m), int(d)
if y and m and d:
ac_node[node] = date(y,m,d)
else:
# process string slots
ac_node[node] = value
# series titles are used as anime identifiers
# the keys for the resulting dictionary are encoded to ASCII, so they
# can be simply put into shelves
key = ac_node['series_title'].encode('utf-8')
# add node entry to the resulting nodelist
ac_remote_anime_dict[key] = ac_node
# the resulting dict is like this:
# {<ASCII-fied key from title>: {<mal_anime_data_schema-fields>: <values>}, ...}
return ac_remote_anime_dict
def _logchanges(remote, local, deleted):
""" Writes changes to logfile.
"""
f = open(ac_log_path, 'a')
now = str(int(time.mktime(datetime.now().timetuple())))
for key, value in remote.items():
f.write(now + ': Fetching "' + key +
'" episode ' + str(value['my_watched_episodes']) + '\n')
for key, value in local.items():
f.write(now + ': Pushing "' + key +
'" episode ' + str(value['my_watched_episodes']) + '\n')
for entry in deleted:
f.write(now + ': Deleted "' + entry + '"\n')
f.close()
def _login(username, password):
"""
Log in to MyAnimeList server.
Returns: True on success, False on failure
"""
# prepare login data
login_base_url = 'http://myanimelist.net/login.php'
headers = {
'User-Agent': 'anichou',
'Content-Type': 'application/x-www-form-urlencoded'}
login_data = urllib.urlencode({
'username': username,
'password': password,
'cookie': 1,
'sublogin': 'Login'})
# phrase login request (to perform a POST request)
login_request = urllib2.Request(login_base_url, login_data, headers)
# try to connect and authenticate with MyAnimeList server
try:
login_response = urllib2.urlopen(login_request).read()
except urllib2.URLError, e:
if hasattr(e, 'reason'):
print 'Failed to reach myanimelist.net.'
print 'Reason: ', e.reason
elif hasattr(e, 'code'):
print 'The server couldn\'t fulfill the request.'
print 'Error code: ', e.code
return False
# check if login was successful
if not login_response.count('<div class="badresult">'):
if login_response == "Couldn't open s-database. Please contact Xinil.":
return False
return True
else:
return False
def _filter_sync_changes(ac_remote_anime_dict, ac_local_anime_dict):
"""
Private Method
Compares the anime entry my_last_updated in both parameters and returns two
dictionaries of changed values of both parameters.
The one for the local dictionary can be used to push changes to the mal
server while the other can be used to update the local display and database.
Returns:
remote_updates: changes that are more up to date on the server
local_updates: changes that are more up to date locally
deleted_enry_keys: keys that are in the local database, but not in the
remote list.
"""
remote_updates = dict()
local_updates = dict()
# search for entirely new enries and deleted entries
remote_keys = ac_remote_anime_dict.keys()
local_keys = ac_local_anime_dict.keys()
deleted_entry_keys = \
filter(lambda x:x not in remote_keys, local_keys)
new_entry_keys = \
filter(lambda x:x not in local_keys, remote_keys)
for key in new_entry_keys:
remote_updates[key] = ac_remote_anime_dict[key]
# search in both dictionaries for differing update keys and append to the
# other's updates depending on which key is newer
common_keys = filter(lambda x:x in local_keys, remote_keys)
for key in common_keys:
remote_timestamp = ac_remote_anime_dict[key]['my_last_updated']
local_timestamp = ac_local_anime_dict[key]['my_last_updated']
if remote_timestamp > local_timestamp:
remote_updates[key] = ac_remote_anime_dict[key]
elif remote_timestamp < local_timestamp:
local_updates[key] = ac_local_anime_dict[key]
return (remote_updates, local_updates, deleted_entry_keys)
def _push_list(local_updates):
"""
Private Medoth
Updates every entry in the local updates dictionary to the mal server.
Should be called after the local updates are determined with the
filter_sync_changes function.
Returns:
True on success, False on failure
"""
headers = {
'User-Agent': 'anichou',
'Content-Type': 'application/x-www-form-urlencoded'}
for anime in local_updates.values():
# construct push request for entry update
postdata = urllib.urlencode({
# id entry
'series_animedb_id': str(anime['series_animedb_id']),
'series_title': str(anime['series_animedb_id']),
# set interesting values
'completed_eps': str(anime['my_watched_episodes']),
'status': str(anime['my_status']),
'score': str(anime['my_score']),
# protocol stuff
'close_on_update': 'true',
'submitIt': 2 })
push_base_url = \
'http://myanimelist.net/panel.php?keepThis=true&go=edit&id=' + \
str(anime['my_id']) + '&hidenav=true&TB_iframe=false'
push_request = urllib2.Request(push_base_url, postdata, headers)
# push update request
try:
response = urllib2.urlopen(push_request)
# print response.read() # -- for testing
except URLError, e:
if hasattr(e, 'reason'):
print 'We failed to reach a server.'
print 'Reason: ', e.reason
elif hasattr(e, 'code'):
print 'The server couldn\'t fulfill the request.'
print 'Error code: ', e.code
return False
return True
| gpl-3.0 |
rschnapka/bank-payment | account_direct_debit/model/account_invoice.py | 1 | 5845 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2011 - 2013 Therp BV (<http://therp.nl>).
#
# All other contributions are (C) by their respective contributors
#
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""
This module adds support for Direct debit orders as applicable
in the Netherlands. Debit orders are advanced in total by the bank.
Amounts that cannot be debited or are canceled by account owners are
credited afterwards. Such a creditation is called a storno.
Invoice workflow:
1 the sale leads to
1300 Debtors 100
8000 Sales 100
Balance:
Debtors 2000 |
Sales | 2000
2 an external booking takes place
1100 Bank 100
1300 Debtors 100
This booking is reconciled with [1]
The invoice gets set to state 'paid', and 'reconciled' = True
Balance:
Debtors 1900 |
Bank 100 |
Sales | 2000
This module implements the following diversion:
2a the invoice is included in a direct debit order. When the order is
confirmed, a move is created per invoice:
2000 Transfer account 100 |
1300 Debtors | 100
Reconciliation takes place between 1 and 2a.
The invoice gets set to state 'paid', and 'reconciled' = True
Balance:
Debtors 0 |
Transfer account 2000 |
Bank 0 |
Sales | 2000
3a the direct debit order is booked on the bank account
Balance:
1100 Bank 2000 |
2000 Transfer account | 2000
Reconciliation takes place between 3a and 2a
Balance:
Debtors 0 |
Transfer account 0 |
Bank 2000 |
Sales | 2000
4 a storno from invoice [1] triggers a new booking on the bank account
1300 Debtors 100 |
1100 Bank | 100
Balance:
Debtors 100 |
Transfer account 0 |
Bank 1900 |
Sales | 2000
The reconciliation of 2a is undone. The booking of 2a is reconciled
with the booking of 4 instead.
The payment line attribute 'storno' is set to True and the invoice
state is no longer 'paid'.
Two cases need to be distinguisted:
1) If the storno is a manual storno from the partner, the invoice is set to
state 'debit_denied', with 'reconciled' = False
This module implements this option by allowing the bank module to call
netsvc.LocalService("workflow").trg_validate(
uid, 'account.invoice', ids, 'debit_denied', cr)
2) If the storno is an error generated by the bank (assumingly non-fatal),
the invoice is reopened for the next debit run.
This is a call to existing
netsvc.LocalService("workflow").trg_validate(
uid, 'account.invoice', ids, 'open_test', cr)
Should also be adding a log entry on the invoice for tracing purposes
self._log_event(cr, uid, ids, -1.0, 'Debit denied')
If not for that funny comment
"#TODO: implement messages system" in account/invoice.py
Repeating non-fatal fatal errors need to be dealt with manually by checking
open invoices with a matured invoice- or due date.
"""
from openerp.osv import orm
from openerp.tools.translate import _
class account_invoice(orm.Model):
_inherit = "account.invoice"
def __init__(self, pool, cr):
"""
Adding a state to the hardcoded state list of the inherited
model. The alternative is duplicating the field definition
in columns but only one module can do that!
Maybe apply a similar trick when overriding the buttons' 'states'
attributes in the form view, manipulating the xml in fields_view_get().
"""
super(account_invoice, self).__init__(pool, cr)
invoice_obj = pool.get('account.invoice')
invoice_obj._columns['state'].selection.append(
('debit_denied', 'Debit denied'))
def action_debit_denied(self, cr, uid, ids, context=None):
for invoice_id in ids:
if self.test_paid(cr, uid, [invoice_id], context):
number = self.read(
cr, uid, invoice_id, ['number'], context=context)['number']
raise orm.except_orm(
_('Error !'),
_("You cannot set invoice '%s' to state 'debit denied', "
'as it is still reconciled.') % number)
self.write(cr, uid, ids, {'state': 'debit_denied'}, context=context)
for inv_id, name in self.name_get(cr, uid, ids, context=context):
message = _("Invoice '%s': direct debit is denied.") % name
self.log(cr, uid, inv_id, message)
return True
def test_undo_debit_denied(self, cr, uid, ids, context=None):
"""
Called from the workflow. Used to unset paid state on
invoices that were paid with bank transfers which are being cancelled
"""
for invoice in self.read(cr, uid, ids, ['reconciled'], context):
if not invoice['reconciled']:
return False
return True
| agpl-3.0 |
gautam1858/tensorflow | tensorflow/contrib/framework/python/ops/ops_test.py | 118 | 2882 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tensor_util tests."""
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework.python.ops import ops as ops_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
class OpsTest(test.TestCase):
def testGetGraphFromEmptyInputs(self):
with ops.Graph().as_default() as g0:
self.assertIs(g0, ops_lib.get_graph_from_inputs([]))
def testGetGraphFromValidInputs(self):
g0 = ops.Graph()
with g0.as_default():
values = [constant_op.constant(0.0), constant_op.constant(1.0)]
self.assertIs(g0, ops_lib.get_graph_from_inputs(values))
self.assertIs(g0, ops_lib.get_graph_from_inputs(values, g0))
with ops.Graph().as_default():
self.assertIs(g0, ops_lib.get_graph_from_inputs(values))
self.assertIs(g0, ops_lib.get_graph_from_inputs(values, g0))
def testGetGraphFromInvalidInputs(self):
g0 = ops.Graph()
with g0.as_default():
values = [constant_op.constant(0.0), constant_op.constant(1.0)]
g1 = ops.Graph()
with self.assertRaisesRegexp(ValueError, "not from the passed-in graph"):
ops_lib.get_graph_from_inputs(values, g1)
with g1.as_default():
values.append(constant_op.constant(2.0))
with self.assertRaisesRegexp(ValueError, "must be from the same graph"):
ops_lib.get_graph_from_inputs(values)
with self.assertRaisesRegexp(ValueError, "not from the passed-in graph"):
ops_lib.get_graph_from_inputs(values, g0)
with self.assertRaisesRegexp(ValueError, "not from the passed-in graph"):
ops_lib.get_graph_from_inputs(values, g1)
def testGetNameScope(self):
with ops.name_scope("scope1"):
with ops.name_scope("scope2"):
with ops.name_scope("scope3"):
self.assertEqual("scope1/scope2/scope3", ops_lib.get_name_scope())
self.assertEqual("scope1/scope2", ops_lib.get_name_scope())
self.assertEqual("scope1", ops_lib.get_name_scope())
self.assertEqual("", ops_lib.get_name_scope())
if __name__ == "__main__":
test.main()
| apache-2.0 |
edcast-inc/edx-platform-edcast | common/djangoapps/monitoring/signals.py | 172 | 4584 | """
Add receivers for django signals, and feed data into the monitoring system.
If a model has a class attribute 'METRIC_TAGS' that is a list of strings,
those fields will be retrieved from the model instance, and added as tags to
the recorded metrics.
"""
from django.db.models.signals import post_save, post_delete, m2m_changed, post_init
from django.dispatch import receiver
import dogstats_wrapper as dog_stats_api
def _database_tags(action, sender, kwargs):
"""
Return a tags for the sender and database used in django.db.models signals.
Arguments:
action (str): What action is being performed on the db model.
sender (Model): What model class is the action being performed on.
kwargs (dict): The kwargs passed by the model signal.
"""
tags = _model_tags(kwargs, 'instance')
tags.append(u'action:{}'.format(action))
if 'using' in kwargs:
tags.append(u'database:{}'.format(kwargs['using']))
return tags
def _model_tags(kwargs, key):
"""
Return a list of all tags for all attributes in kwargs[key].MODEL_TAGS,
plus a tag for the model class.
"""
if key not in kwargs:
return []
instance = kwargs[key]
tags = [
u'{}.{}:{}'.format(key, attr, getattr(instance, attr))
for attr in getattr(instance, 'MODEL_TAGS', [])
]
tags.append(u'model_class:{}'.format(instance.__class__.__name__))
return tags
@receiver(post_init, dispatch_uid='edxapp.monitoring.post_init_metrics')
def post_init_metrics(sender, **kwargs):
"""
Record the number of times that django models are instantiated.
Args:
sender (Model): The model class sending the signals.
using (str): The name of the database being used for this initialization (optional).
instance (Model instance): The instance being initialized (optional).
"""
tags = _database_tags('initialized', sender, kwargs)
dog_stats_api.increment('edxapp.db.model', tags=tags)
@receiver(post_save, dispatch_uid='edxapp.monitoring.post_save_metrics')
def post_save_metrics(sender, **kwargs):
"""
Record the number of times that django models are saved (created or updated).
Args:
sender (Model): The model class sending the signals.
using (str): The name of the database being used for this update (optional).
instance (Model instance): The instance being updated (optional).
"""
action = 'created' if kwargs.pop('created', False) else 'updated'
tags = _database_tags(action, sender, kwargs)
dog_stats_api.increment('edxapp.db.model', tags=tags)
@receiver(post_delete, dispatch_uid='edxapp.monitoring.post_delete_metrics')
def post_delete_metrics(sender, **kwargs):
"""
Record the number of times that django models are deleted.
Args:
sender (Model): The model class sending the signals.
using (str): The name of the database being used for this deletion (optional).
instance (Model instance): The instance being deleted (optional).
"""
tags = _database_tags('deleted', sender, kwargs)
dog_stats_api.increment('edxapp.db.model', tags=tags)
@receiver(m2m_changed, dispatch_uid='edxapp.monitoring.m2m_changed_metrics')
def m2m_changed_metrics(sender, **kwargs):
"""
Record the number of times that Many2Many fields are updated. This is separated
from post_save and post_delete, because it's signaled by the database model in
the middle of the Many2Many relationship, rather than either of the models
that are the relationship participants.
Args:
sender (Model): The model class in the middle of the Many2Many relationship.
action (str): The action being taken on this Many2Many relationship.
using (str): The name of the database being used for this deletion (optional).
instance (Model instance): The instance whose many-to-many relation is being modified.
model (Model class): The model of the class being added/removed/cleared from the relation.
"""
if 'action' not in kwargs:
return
action = {
'post_add': 'm2m.added',
'post_remove': 'm2m.removed',
'post_clear': 'm2m.cleared',
}.get(kwargs['action'])
if not action:
return
tags = _database_tags(action, sender, kwargs)
if 'model' in kwargs:
tags.append('target_class:{}'.format(kwargs['model'].__name__))
pk_set = kwargs.get('pk_set', []) or []
dog_stats_api.increment(
'edxapp.db.model',
value=len(pk_set),
tags=tags
)
| agpl-3.0 |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/OpenGL/GL/INTEL/map_texture.py | 9 | 1716 | '''OpenGL extension INTEL.map_texture
This module customises the behaviour of the
OpenGL.raw.GL.INTEL.map_texture to provide a more
Python-friendly API
Overview (from the spec)
Systems with integrated GPUs can share the same physical memory between CPU
and GPU. This feature, if exposed by API, can bring significant performance
benefits for graphics applications by reducing the complexity of
uploading/accessing texture contents. This extension enables CPU direct
access to the GPU memory holding textures.
The problem with texture memory directly exposed to clients is that
textures are often 'tiled'. Texels are kept in specific layout to improve
locality of reference and thus performance of texturing. This 'tiling'
is specific to particular hardware and would be thus difficult to use.
This extension allows to create textures with 'linear' layout which allows
for simplified access on user side (potentially sacrificing some
performance during texture sampling).
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/INTEL/map_texture.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.INTEL.map_texture import *
from OpenGL.raw.GL.INTEL.map_texture import _EXTENSION_NAME
def glInitMapTextureINTEL():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
glMapTexture2DINTEL=wrapper.wrapper(glMapTexture2DINTEL).setInputArraySize(
'stride', 1
).setInputArraySize(
'layout', 1
)
### END AUTOGENERATED SECTION | gpl-3.0 |
analyseuc3m/ANALYSE-v1 | lms/djangoapps/mobile_api/course_info/views.py | 3 | 3753 | """
Views for course info API
"""
from django.http import Http404
from rest_framework import generics
from rest_framework.response import Response
from courseware.courses import get_course_info_section_module
from static_replace import make_static_urls_absolute
from openedx.core.lib.xblock_utils import get_course_update_items
from ..utils import mobile_view, mobile_course_access
@mobile_view()
class CourseUpdatesList(generics.ListAPIView):
"""
**Use Case**
Get the content for course updates.
**Example Request**
GET /api/mobile/v0.5/course_info/{course_id}/updates
**Response Values**
If the request is successful, the request returns an HTTP 200 "OK"
response along with an array of course updates. Each course update
contains the following values.
* content: The content, as an HTML string, of the course update.
* date: The date of the course update.
* id: The unique identifier of the update.
* status: Whether the update is visible or not.
"""
@mobile_course_access()
def list(self, request, course, *args, **kwargs):
course_updates_module = get_course_info_section_module(request, request.user, course, 'updates')
update_items = get_course_update_items(course_updates_module)
updates_to_show = [
update for update in update_items
if update.get("status") != "deleted"
]
for item in updates_to_show:
item['content'] = apply_wrappers_to_content(item['content'], course_updates_module, request)
return Response(updates_to_show)
@mobile_view()
class CourseHandoutsList(generics.ListAPIView):
"""
**Use Case**
Get the HTML for course handouts.
**Example Request**
GET /api/mobile/v0.5/course_info/{course_id}/handouts
**Response Values**
If the request is successful, the request returns an HTTP 200 "OK"
response along with the following value.
* handouts_html: The HTML for course handouts.
"""
@mobile_course_access()
def list(self, request, course, *args, **kwargs):
course_handouts_module = get_course_info_section_module(request, request.user, course, 'handouts')
if course_handouts_module:
if course_handouts_module.data == "<ol></ol>":
handouts_html = None
else:
handouts_html = apply_wrappers_to_content(course_handouts_module.data, course_handouts_module, request)
return Response({'handouts_html': handouts_html})
else:
# course_handouts_module could be None if there are no handouts
return Response({'handouts_html': None})
def apply_wrappers_to_content(content, module, request):
"""
Updates a piece of html content with the filter functions stored in its module system, then replaces any
static urls with absolute urls.
Args:
content: The html content to which to apply the content wrappers generated for this module system.
module: The module containing a reference to the module system which contains functions to apply to the
content. These functions include:
* Replacing static url's
* Replacing course url's
* Replacing jump to id url's
request: The request, used to replace static URLs with absolute URLs.
Returns: A piece of html content containing the original content updated by each wrapper.
"""
content = module.system.replace_urls(content)
content = module.system.replace_course_urls(content)
content = module.system.replace_jump_to_id_urls(content)
return make_static_urls_absolute(request, content)
| agpl-3.0 |
xianggong/m2c_unit_test | test/math/native_sin_float8/compile.py | 1861 | 4430 | #!/usr/bin/python
import os
import subprocess
import re
def runCommand(command):
p = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
p.wait()
return iter(p.stdout.readline, b'')
def dumpRunCommand(command, dump_file_name, postfix):
dumpFile = open(dump_file_name + postfix, "w+")
dumpFile.write(command + "\n")
for line in runCommand(command.split()):
dumpFile.write(line)
def rmFile(file_name):
cmd = "rm -rf " + file_name
runCommand(cmd.split())
def rnm_ir(file_name):
# Append all unnamed variable with prefix 'tmp_'
ir_file_name = file_name + ".ll"
if os.path.isfile(ir_file_name):
fo = open(ir_file_name, "rw+")
lines = fo.readlines()
fo.seek(0)
fo.truncate()
for line in lines:
# Add entry block identifier
if "define" in line:
line += "entry:\n"
# Rename all unnamed variables
line = re.sub('\%([0-9]+)',
r'%tmp_\1',
line.rstrip())
# Also rename branch name
line = re.sub('(\;\ \<label\>\:)([0-9]+)',
r'tmp_\2:',
line.rstrip())
fo.write(line + '\n')
def gen_ir(file_name):
# Directories
root_dir = '../../../'
header_dir = root_dir + "inc/"
# Headers
header = " -I " + header_dir
header += " -include " + header_dir + "m2c_buildin_fix.h "
header += " -include " + header_dir + "clc/clc.h "
header += " -D cl_clang_storage_class_specifiers "
gen_ir = "clang -S -emit-llvm -O0 -target r600-- -mcpu=verde "
cmd_gen_ir = gen_ir + header + file_name + ".cl"
dumpRunCommand(cmd_gen_ir, file_name, ".clang.log")
def asm_ir(file_name):
if os.path.isfile(file_name + ".ll"):
# Command to assemble IR to bitcode
gen_bc = "llvm-as "
gen_bc_src = file_name + ".ll"
gen_bc_dst = file_name + ".bc"
cmd_gen_bc = gen_bc + gen_bc_src + " -o " + gen_bc_dst
runCommand(cmd_gen_bc.split())
def opt_bc(file_name):
if os.path.isfile(file_name + ".bc"):
# Command to optmize bitcode
opt_bc = "opt --mem2reg "
opt_ir_src = file_name + ".bc"
opt_ir_dst = file_name + ".opt.bc"
cmd_opt_bc = opt_bc + opt_ir_src + " -o " + opt_ir_dst
runCommand(cmd_opt_bc.split())
def dis_bc(file_name):
if os.path.isfile(file_name + ".bc"):
# Command to disassemble bitcode
dis_bc = "llvm-dis "
dis_ir_src = file_name + ".opt.bc"
dis_ir_dst = file_name + ".opt.ll"
cmd_dis_bc = dis_bc + dis_ir_src + " -o " + dis_ir_dst
runCommand(cmd_dis_bc.split())
def m2c_gen(file_name):
if os.path.isfile(file_name + ".opt.bc"):
# Command to disassemble bitcode
m2c_gen = "m2c --llvm2si "
m2c_gen_src = file_name + ".opt.bc"
cmd_m2c_gen = m2c_gen + m2c_gen_src
dumpRunCommand(cmd_m2c_gen, file_name, ".m2c.llvm2si.log")
# Remove file if size is 0
if os.path.isfile(file_name + ".opt.s"):
if os.path.getsize(file_name + ".opt.s") == 0:
rmFile(file_name + ".opt.s")
def m2c_bin(file_name):
if os.path.isfile(file_name + ".opt.s"):
# Command to disassemble bitcode
m2c_bin = "m2c --si2bin "
m2c_bin_src = file_name + ".opt.s"
cmd_m2c_bin = m2c_bin + m2c_bin_src
dumpRunCommand(cmd_m2c_bin, file_name, ".m2c.si2bin.log")
def main():
# Commands
for file in os.listdir("./"):
if file.endswith(".cl"):
file_name = os.path.splitext(file)[0]
# Execute commands
gen_ir(file_name)
rnm_ir(file_name)
asm_ir(file_name)
opt_bc(file_name)
dis_bc(file_name)
m2c_gen(file_name)
m2c_bin(file_name)
if __name__ == "__main__":
main()
| gpl-2.0 |
pankajnits/vyked | vyked/registry_client.py | 2 | 6600 | import asyncio
import logging
import random
from collections import defaultdict
from again.utils import unique_hex
from functools import partial
from retrial.retrial import retry
from .packet import ControlPacket
from .protocol_factory import get_vyked_protocol
from .pinger import TCPPinger
def _retry_for_result(result):
if isinstance(result, tuple):
return not isinstance(result[0], asyncio.transports.Transport) or not isinstance(result[1], asyncio.Protocol)
return True
def _retry_for_exception(_):
return True
class RegistryClient:
logger = logging.getLogger(__name__)
def __init__(self, loop, host, port):
self._loop = loop
self._port = port
self._host = host
self.bus = None
self._service_host = None
self._service_port = None
self._transport = None
self._protocol = None
self._service = None
self._version = None
self._node_id = None
self._pinger = None
self._pending_requests = {}
self._available_services = defaultdict(list)
self._assigned_services = defaultdict(lambda: defaultdict(list))
def register(self, ip, port, service, version, vendors, service_type):
self._service_host = ip
self._service_port = port
self._service = service
self._version = version
self._node_id = '{}_{}_{}'.format(service, version, unique_hex())
packet = ControlPacket.registration(ip, port, self._node_id, service, version, vendors, service_type)
self._protocol.send(packet)
def get_instances(self, service, version):
packet = ControlPacket.get_instances(service, version)
future = asyncio.Future()
self._protocol.send(packet)
self._pending_requests[packet['request_id']] = future
return future
def get_subscribers(self, service, version, endpoint):
packet = ControlPacket.get_subscribers(service, version, endpoint)
# TODO : remove duplication in get_instances and get_subscribers
future = asyncio.Future()
self._protocol.send(packet)
self._pending_requests[packet['request_id']] = future
return future
def x_subscribe(self, endpoints):
packet = ControlPacket.xsubscribe(self._service, self._version, self._service_host, self._service_port,
self._node_id,
endpoints)
self._protocol.send(packet)
@retry(should_retry_for_result=_retry_for_result, should_retry_for_exception=_retry_for_exception,
strategy=[0, 2, 4, 8, 16, 32])
def connect(self):
self._transport, self._protocol = yield from self._loop.create_connection(partial(get_vyked_protocol, self),
self._host, self._port)
self._pinger = TCPPinger('registry', self._protocol, self)
self._pinger.ping()
return self._transport, self._protocol
def on_timeout(self, node_id):
asyncio.async(self.connect())
def receive(self, packet: dict, protocol, transport):
if packet['type'] == 'registered':
self.cache_vendors(packet['params']['vendors'])
self.bus.registration_complete()
elif packet['type'] == 'deregister':
self._handle_deregistration(packet)
elif packet['type'] == 'subscribers':
self._handle_subscriber_packet(packet)
elif packet['type'] == 'pong':
self._pinger.pong_received()
def get_all_addresses(self, full_service_name):
return self._available_services.get(
self._get_full_service_name(full_service_name[0], full_service_name[1]))
def get_for_node(self, node_id):
for services in self._available_services.values():
for host, port, node, service_type in services:
if node == node_id:
return host, port, node, service_type
return None
def get_random_service(self, service_name, service_type):
services = self._available_services[service_name]
services = [service for service in services if service[3] == service_type]
if len(services):
return random.choice(services)
else:
return None
def resolve(self, service: str, version: str, entity: str, service_type: str):
service_name = self._get_full_service_name(service, version)
if entity is not None:
entity_map = self._assigned_services.get(service_name)
if entity_map is None:
self._assigned_services[service_name] = {}
entity_map = self._assigned_services.get(service_name)
if entity in entity_map:
return entity_map[entity]
else:
host, port, node_id, service_type = self.get_random_service(service_name, service_type)
if node_id is not None:
entity_map[entity] = host, port, node_id, service_type
return host, port, node_id, service_type
else:
return self.get_random_service(service_name, service_type)
@staticmethod
def _get_full_service_name(service, version):
return "{}/{}".format(service, version)
def cache_vendors(self, vendors):
for vendor in vendors:
vendor_name = self._get_full_service_name(vendor['name'], vendor['version'])
for address in vendor['addresses']:
self._available_services[vendor_name].append(
(address['host'], address['port'], address['node_id'], address['type']))
def _handle_deregistration(self, packet):
params = packet['params']
vendor = self._get_full_service_name(params['service'], params['version'])
node = params['node_id']
for each in self._available_services[vendor]:
if each[2] == node:
self._available_services[vendor].remove(each)
entity_map = self._assigned_services.get(vendor)
if entity_map is not None:
stale_entities = []
for entity, node_id in entity_map.items():
if node == node_id:
stale_entities.append(entity)
for entity in stale_entities:
entity_map.pop(entity)
def _handle_subscriber_packet(self, packet):
request_id = packet['request_id']
future = self._pending_requests[request_id]
future.set_result(packet['params']['subscribers'])
| mit |
liang42hao/bokeh | bokeh/transforms/image_downsample.py | 43 | 2158 | from __future__ import absolute_import
import numpy as np
from ..models import ServerDataSource
try:
import scipy
import scipy.misc
except ImportError as e:
print(e)
def source(**kwargs):
kwargs['transform'] = {'resample':'heatmap',
'global_x_range' : [0, 10],
'global_y_range' : [0, 10],
'global_offset_x' : [0],
'global_offset_y' : [0],
'type' : 'ndarray',
}
kwargs['data'] = {'x': [0],
'y': [0],
'dw' : [10],
'dh' : [10],
}
return ServerDataSource(**kwargs)
def downsample(image, image_x_axis, image_y_axis,
x_bounds, y_bounds, x_resolution, y_resolution):
x_resolution, y_resolution = int(round(x_resolution)), int(round(y_resolution))
x_bounds = [x_bounds.start, x_bounds.end]
y_bounds = [y_bounds.start, y_bounds.end]
x_bounds = np.searchsorted(image_x_axis, x_bounds)
y_bounds = np.searchsorted(image_y_axis, y_bounds)
#y_bounds = image.shape[0] + 1 - y_bounds[::-1]
if x_resolution == 0 or y_resolution == 0:
subset = np.zeros((1,1), dtype=image.dtype)
else:
subset = image[y_bounds[0]:y_bounds[1],
x_bounds[0]:x_bounds[1]]
x_downsample_factor = max(round(subset.shape[1] / x_resolution / 3.), 1)
y_downsample_factor = max(round(subset.shape[0] / y_resolution / 3.), 1)
subset = subset[::x_downsample_factor, ::y_downsample_factor]
subset = scipy.misc.imresize(subset, (x_resolution, y_resolution),
interp='nearest')
bounds = image_x_axis[x_bounds[0]:x_bounds[1]]
dw = np.max(bounds) - np.min(bounds)
bounds = image_y_axis[y_bounds[0]:y_bounds[1]]
dh = np.max(bounds) - np.min(bounds)
return {'data' : {'image': [subset],
'x': [image_x_axis[x_bounds[0]]],
'y': [image_y_axis[y_bounds[0]]],
'dw': [dw],
'dh': [dh],
}
}
| bsd-3-clause |
serverdensity/sd-agent-core-plugins | kafka/test_kafka.py | 1 | 4269 | # (C) Datadog, Inc. 2010-2017
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# stdlib
import threading
import time
from types import ListType
import unittest
import os
import mock
# 3p
from nose.plugins.attrib import attr
# project
from aggregator import MetricsAggregator
import logging
log = logging.getLogger('kafka_test')
STATSD_PORT = 8121
LOG_INFO = {
'log_to_event_viewer': False,
'log_to_syslog': False,
'syslog_host': None,
'syslog_port': None,
'log_level': logging.INFO,
'disable_file_logging': True,
'collector_log_file': '/tmp/collector.log',
'forwarder_log_file': '/tmp/forwarder.log',
'dogstatsd_log_file': '/tmp/dogstatsd.log',
'jmxfetch_log_file': '/tmp/sd-agent/jmxfetch.log',
'go-metro_log_file': '/tmp/sd-agent/go-metro.log',
}
with mock.patch('config.get_logging_config', return_value=LOG_INFO):
from jmxfetch import JMXFetch
from dogstatsd import Server
class DummyReporter(threading.Thread):
def __init__(self, metrics_aggregator):
threading.Thread.__init__(self)
self.finished = threading.Event()
self.metrics_aggregator = metrics_aggregator
self.interval = 10
self.metrics = None
self.finished = False
self.start()
def run(self):
while not self.finished:
time.sleep(self.interval)
self.flush()
def flush(self):
metrics = self.metrics_aggregator.flush()
if metrics:
self.metrics = metrics
@attr(requires='kafka')
class TestKafka(unittest.TestCase):
"""Basic Test for kafka integration."""
def setUp(self):
aggregator = MetricsAggregator("test_host")
self.server = Server(aggregator, "localhost", STATSD_PORT)
self.reporter = DummyReporter(aggregator)
self.t1 = threading.Thread(target=self.server.start)
self.t1.start()
confd_path = os.path.join(os.path.dirname(__file__), 'ci/resources/')
self.jmx_daemon = JMXFetch(confd_path, {'dogstatsd_port': STATSD_PORT})
self.t2 = threading.Thread(target=self.jmx_daemon.run)
self.t2.start()
def tearDown(self):
self.server.stop()
self.reporter.finished = True
self.jmx_daemon.terminate()
def testCustomJMXMetric(self):
count = 0
while self.reporter.metrics is None:
time.sleep(1)
count += 1
if count > 25:
raise Exception("No metrics were received in 25 seconds")
metrics = self.reporter.metrics
# expected_tags = ['env:test', 'instance:kafka-172.17.0.1-9999', 'kafka:broker']
self.assertTrue(isinstance(metrics, ListType))
self.assertTrue(len(metrics) > 0)
log.info(metrics)
log.info(len(metrics))
self.assertTrue(
len([t for t in metrics if "jvm." in t['metric'] and "instance:kafka-172.17.0.1-9999" in t['tags']]) >= 13, metrics)
self.assertTrue(
len([t for t in metrics if "kafka.request." in t['metric'] and "instance:kafka-172.17.0.1-9999" in t['tags']]) == 12, metrics)
self.assertTrue(
len([t for t in metrics if "kafka.replication." in t['metric'] and "instance:kafka-172.17.0.1-9999" in t['tags']]) == 6, metrics)
# CLIENT metrics.
# kafka.producer.request_latency_avg
self.assertTrue(
len([t for t in metrics if "kafka.producer." in t['metric'] and "instance:kafka-172.17.0.1-7777" in t['tags']]) == 1, metrics)
# kafka.consumer.fetch_rate, kafka.consumer.max_lag
self.assertTrue(
len([t for t in metrics if "kafka.consumer." in t['metric'] and "instance:kafka-172.17.0.1-7777" in t['tags']]) == 2, metrics)
# self.assertTrue(
# len([t for t in metrics if "kafka.follower." in t['metric'] and "instance:kafka-172.17.0.1-9999" in t['tags']]) > 40, metrics)
# self.assertTrue(
# len([t for t in metrics if "kafka.net." in t['metric'] and "instance:kafka-172.17.0.1-9999" in t['tags']]) > 40, metrics)
# self.assertTrue(
# len([t for t in metrics if "kafka.messages_in" in t['metric'] and "instance:kafka-172.17.0.1-9999" in t['tags']]) > 40, metrics)
| bsd-3-clause |
Tong-Chen/scikit-learn | sklearn/tests/test_cross_validation.py | 4 | 30858 | """Test the cross_validation module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.fixes import unique
from sklearn import cross_validation as cval
from sklearn.base import BaseEstimator
from sklearn.datasets import make_regression
from sklearn.datasets import load_digits
from sklearn.datasets import load_iris
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import explained_variance_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import make_scorer
from sklearn.externals import six
from sklearn.linear_model import Ridge
from sklearn.svm import SVC
class MockListClassifier(BaseEstimator):
"""Dummy classifier to test the cross-validation.
Checks that GridSearchCV didn't convert X to array.
"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
assert_true(isinstance(X, list))
return self
def predict(self, T):
return T.shape[0]
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
class MockClassifier(BaseEstimator):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0):
self.a = a
def fit(self, X, Y=None, sample_weight=None, class_prior=None):
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
return self
def predict(self, T):
return T.shape[0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
y = np.arange(10) // 2
##############################################################################
# Tests
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, expected_n_iter=None, n_samples=None):
# Check that a all the samples appear at least once in a test fold
if expected_n_iter is not None:
assert_equal(len(cv), expected_n_iter)
else:
expected_n_iter = len(cv)
collected_test_samples = set()
iterations = 0
for train, test in cv:
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_iter)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.KFold, 3, 4)
# Check that a warning is raised if the least populated class has too few
# members.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
y = [3, 3, -1, -1, 2]
cv = cval.StratifiedKFold(y, 3)
# checking there was only one warning.
assert_equal(len(w), 1)
# checking it has the right type
assert_equal(w[0].category, Warning)
# checking it's the right warning. This might be a bad test since it's
# a characteristic of the code and not a behavior
assert_true("The least populated class" in str(w[0]))
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
check_cv_coverage(cv, expected_n_iter=3, n_samples=len(y))
# Error when number of folds is <= 1
assert_raises(ValueError, cval.KFold, 2, 0)
assert_raises(ValueError, cval.KFold, 2, 1)
assert_raises(ValueError, cval.StratifiedKFold, y, 0)
assert_raises(ValueError, cval.StratifiedKFold, y, 1)
# When n is not integer:
assert_raises(ValueError, cval.KFold, 2.5, 2)
# When n_folds is not integer:
assert_raises(ValueError, cval.KFold, 5, 1.5)
assert_raises(ValueError, cval.StratifiedKFold, y, 1.5)
def test_kfold_indices():
# Check all indices are returned in the test folds
kf = cval.KFold(300, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=300)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
kf = cval.KFold(17, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=17)
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
splits = iter(cval.KFold(4, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = iter(cval.KFold(5, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
splits = iter(cval.StratifiedKFold([1, 1, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
splits = iter(cval.StratifiedKFold([1, 1, 1, 0, 0, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves label ratios in individual splits
n_samples = 1000
labels = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for train, test in cval.StratifiedKFold(labels, 5):
assert_almost_equal(np.sum(labels[train] == 4) / len(train), 0.10, 2)
assert_almost_equal(np.sum(labels[train] == 0) / len(train), 0.89, 2)
assert_almost_equal(np.sum(labels[train] == 1) / len(train), 0.01, 2)
assert_almost_equal(np.sum(labels[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(labels[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(labels[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for kf in [cval.KFold(i, 5) for i in range(11, 17)]:
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), kf.n)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
labels = [0] * 3 + [1] * 14
for skf in [cval.StratifiedKFold(labels[:i], 3) for i in range(11, 17)]:
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), skf.n)
def test_shuffle_kfold():
# Check the indices are shuffled properly, and that all indices are
# returned in the different test folds
kf = cval.KFold(300, 3, shuffle=True, random_state=0)
ind = np.arange(300)
all_folds = None
for train, test in kf:
sorted_array = np.arange(100)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(101, 200)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(201, 300)
assert_true(np.any(sorted_array != ind[train]))
if all_folds is None:
all_folds = ind[test].copy()
else:
all_folds = np.concatenate((all_folds, ind[test]))
all_folds.sort()
assert_array_equal(all_folds, ind)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact be computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.96) than than the non
# shuffling variant (around 0.86).
digits = load_digits()
X, y = digits.data[:800], digits.target[:800]
model = SVC(C=10, gamma=0.005)
n = len(y)
cv = cval.KFold(n, 5, shuffle=False)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model w.r.t. the writing style of the authors
# by yielding a seriously overestimated score:
cv = cval.KFold(n, 5, shuffle=True, random_state=0)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
cv = cval.KFold(n, 5, shuffle=True, random_state=1)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = cval.StratifiedKFold(y, 5)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
def test_shuffle_split():
ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0)
ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0)
ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0)
for typ in six.integer_types:
ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2)
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2)
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8)
# Train size or test size too small
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50)
]
for y in ys:
sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33,
random_state=0)
for train, test in sss:
assert_array_equal(unique(y[train]), unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(unique(y[train], return_inverse=True)[1]) /
float(len(y[train])))
p_test = (np.bincount(unique(y[test], return_inverse=True)[1]) /
float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(y[train].size + y[test].size, y.size)
assert_array_equal(np.lib.arraysetops.intersect1d(train, test), [])
@ignore_warnings
def test_stratified_shuffle_split_iter_no_indices():
y = np.asarray([0, 1, 2] * 10)
sss1 = cval.StratifiedShuffleSplit(y, indices=False, random_state=0)
train_mask, test_mask = next(iter(sss1))
sss2 = cval.StratifiedShuffleSplit(y, indices=True, random_state=0)
train_indices, test_indices = next(iter(sss2))
assert_array_equal(sorted(test_indices), np.where(test_mask)[0])
def test_leave_label_out_changing_labels():
"""Check that LeaveOneLabelOut and LeavePLabelOut work normally if
the labels variable is changed before calling __iter__"""
labels = np.array([0, 1, 2, 1, 1, 2, 0, 0])
labels_changing = np.array(labels, copy=True)
lolo = cval.LeaveOneLabelOut(labels)
lolo_changing = cval.LeaveOneLabelOut(labels_changing)
lplo = cval.LeavePLabelOut(labels, p=2)
lplo_changing = cval.LeavePLabelOut(labels_changing, p=2)
labels_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cval.cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
scores = cval.cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
# test with X as list
clf = MockListClassifier()
scores = cval.cross_val_score(clf, X.tolist(), y)
assert_raises(ValueError, cval.cross_val_score, clf, X, y,
scoring="sklearn")
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cval.cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cval.cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cval.cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cval.cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes}
cval.cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
score = cval.cross_val_score(clf, X, y, score_func=score_func)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X)
def test_train_test_split_errors():
assert_raises(ValueError, cval.train_test_split)
assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, cval.train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, cval.train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, cval.train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, cval.train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = range(10)
split = cval.train_test_split(X, X_s, y)
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_array_equal(X_train, X_s_train.toarray())
assert_array_equal(X_test, X_s_test.toarray())
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
split = cval.train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="f1", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# also test deprecated old way
with warnings.catch_warnings(record=True):
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
score_func=f1_score, cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cval.cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
mse_scores = cval.cross_val_score(reg, X, y, cv=5,
scoring="mean_squared_error")
expected_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(mse_scores, expected_mse, 2)
# Explained variance
with warnings.catch_warnings(record=True):
ev_scores = cval.cross_val_score(reg, X, y, cv=5,
score_func=explained_variance_score)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = cval.StratifiedKFold(y, 2)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, cv=cv, scoring="accuracy", labels=np.ones(y.size),
random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
scorer = make_scorer(fbeta_score, beta=2)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, scoring=scorer, cv=cv, labels=np.ones(y.size),
random_state=0)
assert_almost_equal(score_label, .97, 2)
assert_almost_equal(pvalue_label, 0.01, 3)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = cval.StratifiedKFold(y, 2)
score_label, _, pvalue_label = cval.permutation_test_score(
svm_sparse, X_sparse, y, cv=cv_sparse,
scoring="accuracy", labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = cval.permutation_test_score(svm, X, y, cv=cv,
scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
# test with deprecated interface
with warnings.catch_warnings(record=True):
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, score_func=accuracy_score, cv=cv)
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_cross_val_generator_with_mask():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = assert_warns(DeprecationWarning, cval.LeaveOneOut,
4, indices=False)
lpo = assert_warns(DeprecationWarning, cval.LeavePOut,
4, 2, indices=False)
kf = assert_warns(DeprecationWarning, cval.KFold,
4, 2, indices=False)
skf = assert_warns(DeprecationWarning, cval.StratifiedKFold,
y, 2, indices=False)
lolo = assert_warns(DeprecationWarning, cval.LeaveOneLabelOut,
labels, indices=False)
lopo = assert_warns(DeprecationWarning, cval.LeavePLabelOut,
labels, 2, indices=False)
ss = assert_warns(DeprecationWarning, cval.ShuffleSplit,
4, indices=False)
for cv in [loo, lpo, kf, skf, lolo, lopo, ss]:
for train, test in cv:
assert_equal(np.asarray(train).dtype.kind, 'b')
assert_equal(np.asarray(train).dtype.kind, 'b')
X_train, X_test = X[train], X[test]
y_train, y_test = y[train], y[test]
def test_cross_val_generator_with_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
# explicitly passing indices value is deprecated
loo = assert_warns(DeprecationWarning, cval.LeaveOneOut,
4, indices=True)
lpo = assert_warns(DeprecationWarning, cval.LeavePOut,
4, 2, indices=True)
kf = assert_warns(DeprecationWarning, cval.KFold,
4, 2, indices=True)
skf = assert_warns(DeprecationWarning, cval.StratifiedKFold,
y, 2, indices=True)
lolo = assert_warns(DeprecationWarning, cval.LeaveOneLabelOut,
labels, indices=True)
lopo = assert_warns(DeprecationWarning, cval.LeavePLabelOut,
labels, 2, indices=True)
b = cval.Bootstrap(2) # only in index mode
ss = assert_warns(DeprecationWarning, cval.ShuffleSplit,
2, indices=True)
for cv in [loo, lpo, kf, skf, lolo, lopo, b, ss]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X_train, X_test = X[train], X[test]
y_train, y_test = y[train], y[test]
def test_cross_val_generator_with_default_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
b = cval.Bootstrap(2) # only in index mode
ss = cval.ShuffleSplit(2)
for cv in [loo, lpo, kf, skf, lolo, lopo, b, ss]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X_train, X_test = X[train], X[test]
y_train, y_test = y[train], y[test]
@ignore_warnings
def test_cross_val_generator_mask_indices_same():
# Test that the cross validation generators return the same results when
# indices=True and when indices=False
y = np.array([0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2])
labels = np.array([1, 1, 2, 3, 3, 3, 4])
loo_mask = cval.LeaveOneOut(5, indices=False)
loo_ind = cval.LeaveOneOut(5, indices=True)
lpo_mask = cval.LeavePOut(10, 2, indices=False)
lpo_ind = cval.LeavePOut(10, 2, indices=True)
kf_mask = cval.KFold(10, 5, indices=False, shuffle=True, random_state=1)
kf_ind = cval.KFold(10, 5, indices=True, shuffle=True, random_state=1)
skf_mask = cval.StratifiedKFold(y, 3, indices=False)
skf_ind = cval.StratifiedKFold(y, 3, indices=True)
lolo_mask = cval.LeaveOneLabelOut(labels, indices=False)
lolo_ind = cval.LeaveOneLabelOut(labels, indices=True)
lopo_mask = cval.LeavePLabelOut(labels, 2, indices=False)
lopo_ind = cval.LeavePLabelOut(labels, 2, indices=True)
for cv_mask, cv_ind in [(loo_mask, loo_ind), (lpo_mask, lpo_ind),
(kf_mask, kf_ind), (skf_mask, skf_ind),
(lolo_mask, lolo_ind), (lopo_mask, lopo_ind)]:
for (train_mask, test_mask), (train_ind, test_ind) in \
zip(cv_mask, cv_ind):
assert_array_equal(np.where(train_mask)[0], train_ind)
assert_array_equal(np.where(test_mask)[0], test_ind)
def test_bootstrap_errors():
assert_raises(ValueError, cval.Bootstrap, 10, train_size=100)
assert_raises(ValueError, cval.Bootstrap, 10, test_size=100)
assert_raises(ValueError, cval.Bootstrap, 10, train_size=1.1)
assert_raises(ValueError, cval.Bootstrap, 10, test_size=1.1)
def test_bootstrap_test_sizes():
assert_equal(cval.Bootstrap(10, test_size=0.2).test_size, 2)
assert_equal(cval.Bootstrap(10, test_size=2).test_size, 2)
assert_equal(cval.Bootstrap(10, test_size=None).test_size, 5)
def test_shufflesplit_errors():
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1,
train_size=0.95)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3)
assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None,
train_size=None)
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = cval.ShuffleSplit(10, random_state=21)
assert_array_equal(list(a for a, b in ss), list(a for a, b in ss))
@ignore_warnings
def test_cross_indices_exception():
X = coo_matrix(np.array([[1, 2], [3, 4], [5, 6], [7, 8]]))
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4, indices=False)
lpo = cval.LeavePOut(4, 2, indices=False)
kf = cval.KFold(4, 2, indices=False)
skf = cval.StratifiedKFold(y, 2, indices=False)
lolo = cval.LeaveOneLabelOut(labels, indices=False)
lopo = cval.LeavePLabelOut(labels, 2, indices=False)
assert_raises(ValueError, cval.check_cv, loo, X, y)
assert_raises(ValueError, cval.check_cv, lpo, X, y)
assert_raises(ValueError, cval.check_cv, kf, X, y)
assert_raises(ValueError, cval.check_cv, skf, X, y)
assert_raises(ValueError, cval.check_cv, lolo, X, y)
assert_raises(ValueError, cval.check_cv, lopo, X, y)
| bsd-3-clause |
0-wiz-0/bgfx | 3rdparty/scintilla/scripts/GenerateCaseConvert.py | 71 | 4580 | # Script to generate CaseConvert.cxx from Python's Unicode data
# Should be run rarely when a Python with a new version of Unicode data is available.
# Requires Python 3.3 or later
# Should not be run with old versions of Python.
# Current best approach divides case conversions into two cases:
# simple symmetric and complex.
# Simple symmetric is where a lower and upper case pair convert to each
# other and the folded form is the same as the lower case.
# There are 1006 symmetric pairs.
# These are further divided into ranges (stored as lower, upper, range length,
# range pitch and singletons (stored as lower, upper).
# Complex is for cases that don't fit the above: where there are multiple
# characters in one of the forms or fold is different to lower or
# lower(upper(x)) or upper(lower(x)) are not x. These are represented as UTF-8
# strings with original, folded, upper, and lower separated by '|'.
# There are 126 complex cases.
import codecs, itertools, os, string, sys, unicodedata
from FileGenerator import Regenerate
def contiguousRanges(l, diff):
# l is s list of lists
# group into lists where first element of each element differs by diff
out = [[l[0]]]
for s in l[1:]:
if s[0] != out[-1][-1][0] + diff:
out.append([])
out[-1].append(s)
return out
def flatten(listOfLists):
"Flatten one level of nesting"
return itertools.chain.from_iterable(listOfLists)
def conversionSets():
# For all Unicode characters, see whether they have case conversions
# Return 2 sets: one of simple symmetric conversion cases and another
# with complex cases.
complexes = []
symmetrics = []
for ch in range(sys.maxunicode):
if ch >= 0xd800 and ch <= 0xDBFF:
continue
if ch >= 0xdc00 and ch <= 0xDFFF:
continue
uch = chr(ch)
fold = uch.casefold()
upper = uch.upper()
lower = uch.lower()
symmetric = False
if uch != upper and len(upper) == 1 and uch == lower and uch == fold:
lowerUpper = upper.lower()
foldUpper = upper.casefold()
if lowerUpper == foldUpper and lowerUpper == uch:
symmetric = True
symmetrics.append((ch, ord(upper), ch - ord(upper)))
if uch != lower and len(lower) == 1 and uch == upper and lower == fold:
upperLower = lower.upper()
if upperLower == uch:
symmetric = True
if fold == uch:
fold = ""
if upper == uch:
upper = ""
if lower == uch:
lower = ""
if (fold or upper or lower) and not symmetric:
complexes.append((uch, fold, upper, lower))
return symmetrics, complexes
def groupRanges(symmetrics):
# Group the symmetrics into groups where possible, returning a list
# of ranges and a list of symmetrics that didn't fit into a range
def distance(s):
return s[2]
groups = []
uniquekeys = []
for k, g in itertools.groupby(symmetrics, distance):
groups.append(list(g)) # Store group iterator as a list
uniquekeys.append(k)
contiguousGroups = flatten([contiguousRanges(g, 1) for g in groups])
longGroups = [(x[0][0], x[0][1], len(x), 1) for x in contiguousGroups if len(x) > 4]
oneDiffs = [s for s in symmetrics if s[2] == 1]
contiguousOnes = flatten([contiguousRanges(g, 2) for g in [oneDiffs]])
longOneGroups = [(x[0][0], x[0][1], len(x), 2) for x in contiguousOnes if len(x) > 4]
rangeGroups = sorted(longGroups+longOneGroups, key=lambda s: s[0])
rangeCoverage = list(flatten([range(r[0], r[0]+r[2]*r[3], r[3]) for r in rangeGroups]))
nonRanges = [(l, u) for l, u, d in symmetrics if l not in rangeCoverage]
return rangeGroups, nonRanges
def escape(s):
return "".join((chr(c) if chr(c) in string.ascii_letters else "\\x%x" % c) for c in s.encode('utf-8'))
def updateCaseConvert():
symmetrics, complexes = conversionSets()
rangeGroups, nonRanges = groupRanges(symmetrics)
print(len(rangeGroups), "ranges")
rangeLines = ["%d,%d,%d,%d, " % x for x in rangeGroups]
print(len(nonRanges), "non ranges")
nonRangeLines = ["%d,%d, " % x for x in nonRanges]
print(len(symmetrics), "symmetric")
complexLines = ['"%s|%s|%s|%s|"' % tuple(escape(t) for t in x) for x in complexes]
print(len(complexLines), "complex")
Regenerate("../src/CaseConvert.cxx", "//", rangeLines, nonRangeLines, complexLines)
updateCaseConvert()
| bsd-2-clause |
sfluo/Mr.Bot | crypto/pycrypto-2.6/lib/Crypto/Cipher/blockalgo.py | 133 | 12596 | # -*- coding: utf-8 -*-
#
# Cipher/blockalgo.py
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Module with definitions common to all block ciphers."""
import sys
if sys.version_info[0] == 2 and sys.version_info[1] == 1:
from Crypto.Util.py21compat import *
from Crypto.Util.py3compat import *
#: *Electronic Code Book (ECB)*.
#: This is the simplest encryption mode. Each of the plaintext blocks
#: is directly encrypted into a ciphertext block, independently of
#: any other block. This mode exposes frequency of symbols
#: in your plaintext. Other modes (e.g. *CBC*) should be used instead.
#:
#: See `NIST SP800-38A`_ , Section 6.1 .
#:
#: .. _`NIST SP800-38A` : http://csrc.nist.gov/publications/nistpubs/800-38a/sp800-38a.pdf
MODE_ECB = 1
#: *Cipher-Block Chaining (CBC)*. Each of the ciphertext blocks depends
#: on the current and all previous plaintext blocks. An Initialization Vector
#: (*IV*) is required.
#:
#: The *IV* is a data block to be transmitted to the receiver.
#: The *IV* can be made public, but it must be authenticated by the receiver and
#: it should be picked randomly.
#:
#: See `NIST SP800-38A`_ , Section 6.2 .
#:
#: .. _`NIST SP800-38A` : http://csrc.nist.gov/publications/nistpubs/800-38a/sp800-38a.pdf
MODE_CBC = 2
#: *Cipher FeedBack (CFB)*. This mode is similar to CBC, but it transforms
#: the underlying block cipher into a stream cipher. Plaintext and ciphertext
#: are processed in *segments* of **s** bits. The mode is therefore sometimes
#: labelled **s**-bit CFB. An Initialization Vector (*IV*) is required.
#:
#: When encrypting, each ciphertext segment contributes to the encryption of
#: the next plaintext segment.
#:
#: This *IV* is a data block to be transmitted to the receiver.
#: The *IV* can be made public, but it should be picked randomly.
#: Reusing the same *IV* for encryptions done with the same key lead to
#: catastrophic cryptographic failures.
#:
#: See `NIST SP800-38A`_ , Section 6.3 .
#:
#: .. _`NIST SP800-38A` : http://csrc.nist.gov/publications/nistpubs/800-38a/sp800-38a.pdf
MODE_CFB = 3
#: This mode should not be used.
MODE_PGP = 4
#: *Output FeedBack (OFB)*. This mode is very similar to CBC, but it
#: transforms the underlying block cipher into a stream cipher.
#: The keystream is the iterated block encryption of an Initialization Vector (*IV*).
#:
#: The *IV* is a data block to be transmitted to the receiver.
#: The *IV* can be made public, but it should be picked randomly.
#:
#: Reusing the same *IV* for encryptions done with the same key lead to
#: catastrophic cryptograhic failures.
#:
#: See `NIST SP800-38A`_ , Section 6.4 .
#:
#: .. _`NIST SP800-38A` : http://csrc.nist.gov/publications/nistpubs/800-38a/sp800-38a.pdf
MODE_OFB = 5
#: *CounTeR (CTR)*. This mode is very similar to ECB, in that
#: encryption of one block is done independently of all other blocks.
#: Unlike ECB, the block *position* contributes to the encryption and no
#: information leaks about symbol frequency.
#:
#: Each message block is associated to a *counter* which must be unique
#: across all messages that get encrypted with the same key (not just within
#: the same message). The counter is as big as the block size.
#:
#: Counters can be generated in several ways. The most straightword one is
#: to choose an *initial counter block* (which can be made public, similarly
#: to the *IV* for the other modes) and increment its lowest **m** bits by
#: one (modulo *2^m*) for each block. In most cases, **m** is chosen to be half
#: the block size.
#:
#: Reusing the same *initial counter block* for encryptions done with the same
#: key lead to catastrophic cryptograhic failures.
#:
#: See `NIST SP800-38A`_ , Section 6.5 (for the mode) and Appendix B (for how
#: to manage the *initial counter block*).
#:
#: .. _`NIST SP800-38A` : http://csrc.nist.gov/publications/nistpubs/800-38a/sp800-38a.pdf
MODE_CTR = 6
#: OpenPGP. This mode is a variant of CFB, and it is only used in PGP and OpenPGP_ applications.
#: An Initialization Vector (*IV*) is required.
#:
#: Unlike CFB, the IV is not transmitted to the receiver. Instead, the *encrypted* IV is.
#: The IV is a random data block. Two of its bytes are duplicated to act as a checksum
#: for the correctness of the key. The encrypted IV is therefore 2 bytes longer than
#: the clean IV.
#:
#: .. _OpenPGP: http://tools.ietf.org/html/rfc4880
MODE_OPENPGP = 7
def _getParameter(name, index, args, kwargs, default=None):
"""Find a parameter in tuple and dictionary arguments a function receives"""
param = kwargs.get(name)
if len(args)>index:
if param:
raise ValueError("Parameter '%s' is specified twice" % name)
param = args[index]
return param or default
class BlockAlgo:
"""Class modelling an abstract block cipher."""
def __init__(self, factory, key, *args, **kwargs):
self.mode = _getParameter('mode', 0, args, kwargs, default=MODE_ECB)
self.block_size = factory.block_size
if self.mode != MODE_OPENPGP:
self._cipher = factory.new(key, *args, **kwargs)
self.IV = self._cipher.IV
else:
# OPENPGP mode. For details, see 13.9 in RCC4880.
#
# A few members are specifically created for this mode:
# - _encrypted_iv, set in this constructor
# - _done_first_block, set to True after the first encryption
# - _done_last_block, set to True after a partial block is processed
self._done_first_block = False
self._done_last_block = False
self.IV = _getParameter('iv', 1, args, kwargs)
if not self.IV:
raise ValueError("MODE_OPENPGP requires an IV")
# Instantiate a temporary cipher to process the IV
IV_cipher = factory.new(key, MODE_CFB,
b('\x00')*self.block_size, # IV for CFB
segment_size=self.block_size*8)
# The cipher will be used for...
if len(self.IV) == self.block_size:
# ... encryption
self._encrypted_IV = IV_cipher.encrypt(
self.IV + self.IV[-2:] + # Plaintext
b('\x00')*(self.block_size-2) # Padding
)[:self.block_size+2]
elif len(self.IV) == self.block_size+2:
# ... decryption
self._encrypted_IV = self.IV
self.IV = IV_cipher.decrypt(self.IV + # Ciphertext
b('\x00')*(self.block_size-2) # Padding
)[:self.block_size+2]
if self.IV[-2:] != self.IV[-4:-2]:
raise ValueError("Failed integrity check for OPENPGP IV")
self.IV = self.IV[:-2]
else:
raise ValueError("Length of IV must be %d or %d bytes for MODE_OPENPGP"
% (self.block_size, self.block_size+2))
# Instantiate the cipher for the real PGP data
self._cipher = factory.new(key, MODE_CFB,
self._encrypted_IV[-self.block_size:],
segment_size=self.block_size*8)
def encrypt(self, plaintext):
"""Encrypt data with the key and the parameters set at initialization.
The cipher object is stateful; encryption of a long block
of data can be broken up in two or more calls to `encrypt()`.
That is, the statement:
>>> c.encrypt(a) + c.encrypt(b)
is always equivalent to:
>>> c.encrypt(a+b)
That also means that you cannot reuse an object for encrypting
or decrypting other data with the same key.
This function does not perform any padding.
- For `MODE_ECB`, `MODE_CBC`, and `MODE_OFB`, *plaintext* length
(in bytes) must be a multiple of *block_size*.
- For `MODE_CFB`, *plaintext* length (in bytes) must be a multiple
of *segment_size*/8.
- For `MODE_CTR`, *plaintext* can be of any length.
- For `MODE_OPENPGP`, *plaintext* must be a multiple of *block_size*,
unless it is the last chunk of the message.
:Parameters:
plaintext : byte string
The piece of data to encrypt.
:Return:
the encrypted data, as a byte string. It is as long as
*plaintext* with one exception: when encrypting the first message
chunk with `MODE_OPENPGP`, the encypted IV is prepended to the
returned ciphertext.
"""
if self.mode == MODE_OPENPGP:
padding_length = (self.block_size - len(plaintext) % self.block_size) % self.block_size
if padding_length>0:
# CFB mode requires ciphertext to have length multiple of block size,
# but PGP mode allows the last block to be shorter
if self._done_last_block:
raise ValueError("Only the last chunk is allowed to have length not multiple of %d bytes",
self.block_size)
self._done_last_block = True
padded = plaintext + b('\x00')*padding_length
res = self._cipher.encrypt(padded)[:len(plaintext)]
else:
res = self._cipher.encrypt(plaintext)
if not self._done_first_block:
res = self._encrypted_IV + res
self._done_first_block = True
return res
return self._cipher.encrypt(plaintext)
def decrypt(self, ciphertext):
"""Decrypt data with the key and the parameters set at initialization.
The cipher object is stateful; decryption of a long block
of data can be broken up in two or more calls to `decrypt()`.
That is, the statement:
>>> c.decrypt(a) + c.decrypt(b)
is always equivalent to:
>>> c.decrypt(a+b)
That also means that you cannot reuse an object for encrypting
or decrypting other data with the same key.
This function does not perform any padding.
- For `MODE_ECB`, `MODE_CBC`, and `MODE_OFB`, *ciphertext* length
(in bytes) must be a multiple of *block_size*.
- For `MODE_CFB`, *ciphertext* length (in bytes) must be a multiple
of *segment_size*/8.
- For `MODE_CTR`, *ciphertext* can be of any length.
- For `MODE_OPENPGP`, *plaintext* must be a multiple of *block_size*,
unless it is the last chunk of the message.
:Parameters:
ciphertext : byte string
The piece of data to decrypt.
:Return: the decrypted data (byte string, as long as *ciphertext*).
"""
if self.mode == MODE_OPENPGP:
padding_length = (self.block_size - len(ciphertext) % self.block_size) % self.block_size
if padding_length>0:
# CFB mode requires ciphertext to have length multiple of block size,
# but PGP mode allows the last block to be shorter
if self._done_last_block:
raise ValueError("Only the last chunk is allowed to have length not multiple of %d bytes",
self.block_size)
self._done_last_block = True
padded = ciphertext + b('\x00')*padding_length
res = self._cipher.decrypt(padded)[:len(ciphertext)]
else:
res = self._cipher.decrypt(ciphertext)
return res
return self._cipher.decrypt(ciphertext)
| bsd-3-clause |
cybertk/mffr | chromium_tools/mffr.py | 1 | 6105 | #!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Usage: mffr.py [-d] [-g *.h] [-g *.cc] REGEXP REPLACEMENT
This tool performs a fast find-and-replace operation on files in
the current git repository.
The -d flag selects a default set of globs (C++ and Objective-C/C++
source files). The -g flag adds a single glob to the list and may
be used multiple times. If neither -d nor -g is specified, the tool
searches all files (*.*).
REGEXP uses full Python regexp syntax. REPLACEMENT can use
back-references.
"""
__version__ = '1.0.0'
import optparse
import re
import subprocess
import sys
# We need to use shell=True with subprocess on Windows so that it
# finds 'git' from the path, but can lead to undesired behavior on
# Linux.
_USE_SHELL = (sys.platform == 'win32')
def MultiFileFindReplace(original, replacement, file_globs):
"""Implements fast multi-file find and replace.
Given an |original| string and a |replacement| string, find matching
files by running git grep on |original| in files matching any
pattern in |file_globs|.
Once files are found, |re.sub| is run to replace |original| with
|replacement|. |replacement| may use capture group back-references.
Args:
original: '(#(include|import)\s*["<])chrome/browser/ui/browser.h([>"])'
replacement: '\1chrome/browser/ui/browser/browser.h\3'
file_globs: ['*.cc', '*.h', '*.m', '*.mm']
Returns the list of files modified.
Raises an exception on error.
"""
# Posix extended regular expressions do not reliably support the "\s"
# shorthand.
posix_ere_original = re.sub(r"\\s", "[[:space:]]", original)
if sys.platform == 'win32':
posix_ere_original = posix_ere_original.replace('"', '""')
out, err = subprocess.Popen(
['git', 'grep', '-E', '--name-only', posix_ere_original,
'--'] + file_globs,
stdout=subprocess.PIPE,
shell=_USE_SHELL).communicate()
referees = out.splitlines()
for referee in referees:
with open(referee) as f:
original_contents = f.read()
contents = re.sub(original, replacement, original_contents)
if contents == original_contents:
raise Exception('No change in file %s although matched in grep' %
referee)
with open(referee, 'wb') as f:
f.write(contents)
return referees
def main():
parser = optparse.OptionParser(usage='''
(1) %prog <options> REGEXP REPLACEMENT
REGEXP uses full Python regexp syntax. REPLACEMENT can use back-references.
(2) %prog <options> -i <file>
<file> should contain a list (in Python syntax) of
[REGEXP, REPLACEMENT, [GLOBS]] lists, e.g.:
[
[r"(foo|bar)", r"\1baz", ["*.cc", "*.h"]],
["54", "42"],
]
As shown above, [GLOBS] can be omitted for a given search-replace list, in which
case the corresponding search-replace will use the globs specified on the
command line.''',
version='%prog ' + __version__)
parser.add_option('-d', action='store_true',
dest='use_default_glob',
help='Perform the change on C++ and Objective-C(++) source '
'and header files.')
parser.add_option('-f', action='store_true',
dest='force_unsafe_run',
help='Perform the run even if there are uncommitted local '
'changes.')
parser.add_option('-g', action='append',
type='string',
default=[],
metavar="<glob>",
dest='user_supplied_globs',
help='Perform the change on the specified glob. Can be '
'specified multiple times, in which case the globs are '
'unioned.')
parser.add_option('-i', "--input_file",
type='string',
action='store',
default='',
metavar="<file>",
dest='input_filename',
help='Read arguments from <file> rather than the command '
'line. NOTE: To be sure of regular expressions being '
'interpreted correctly, use raw strings.')
opts, args = parser.parse_args()
if opts.use_default_glob and opts.user_supplied_globs:
print '"-d" and "-g" cannot be used together'
parser.print_help()
return 1
from_file = opts.input_filename != ""
if (from_file and len(args) != 0) or (not from_file and len(args) != 2):
parser.print_help()
return 1
if not opts.force_unsafe_run:
out, err = subprocess.Popen(['git', 'status', '--porcelain'],
stdout=subprocess.PIPE,
shell=_USE_SHELL).communicate()
if out:
print 'ERROR: This tool does not print any confirmation prompts,'
print 'so you should only run it with a clean staging area and cache'
print 'so that reverting a bad find/replace is as easy as running'
print ' git checkout -- .'
print ''
print 'To override this safeguard, pass the -f flag.'
return 1
global_file_globs = ['*.*']
if opts.use_default_glob:
global_file_globs = ['*.cc', '*.h', '*.m', '*.mm']
elif opts.user_supplied_globs:
global_file_globs = opts.user_supplied_globs
# Construct list of search-replace tasks.
search_replace_tasks = []
if opts.input_filename == '':
original = args[0]
replacement = args[1]
search_replace_tasks.append([original, replacement, global_file_globs])
else:
f = open(opts.input_filename)
search_replace_tasks = eval("".join(f.readlines()))
for task in search_replace_tasks:
if len(task) == 2:
task.append(global_file_globs)
f.close()
for (original, replacement, file_globs) in search_replace_tasks:
print 'File globs: %s' % file_globs
print 'Original: %s' % original
print 'Replacement: %s' % replacement
MultiFileFindReplace(original, replacement, file_globs)
return 0
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
gmt/portage | pym/portage/tests/sync/test_sync_local.py | 2 | 7630 | # Copyright 2014-2015 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import subprocess
import sys
import textwrap
import time
import portage
from portage import os, shutil, _shell_quote
from portage import _unicode_decode
from portage.const import PORTAGE_PYM_PATH, TIMESTAMP_FORMAT
from portage.process import find_binary
from portage.tests import TestCase
from portage.tests.resolver.ResolverPlayground import ResolverPlayground
from portage.util import ensure_dirs
class SyncLocalTestCase(TestCase):
"""
Test sync with rsync and git, using file:// sync-uri.
"""
def _must_skip(self):
if find_binary("rsync") is None:
return "rsync: command not found"
if find_binary("git") is None:
return "git: command not found"
def testSyncLocal(self):
debug = False
skip_reason = self._must_skip()
if skip_reason:
self.portage_skip = skip_reason
self.assertFalse(True, skip_reason)
return
repos_conf = textwrap.dedent("""
[DEFAULT]
%(default_keys)s
[test_repo]
location = %(EPREFIX)s/var/repositories/test_repo
sync-type = %(sync-type)s
sync-uri = file:/%(EPREFIX)s/var/repositories/test_repo_sync
auto-sync = yes
%(repo_extra_keys)s
""")
profile = {
"eapi": ("5",),
"package.use.stable.mask": ("dev-libs/A flag",)
}
ebuilds = {
"dev-libs/A-0": {}
}
user_config = {
'make.conf': ('FEATURES="metadata-transfer"',)
}
playground = ResolverPlayground(ebuilds=ebuilds,
profile=profile, user_config=user_config, debug=debug)
settings = playground.settings
eprefix = settings["EPREFIX"]
eroot = settings["EROOT"]
homedir = os.path.join(eroot, "home")
distdir = os.path.join(eprefix, "distdir")
repo = settings.repositories["test_repo"]
metadata_dir = os.path.join(repo.location, "metadata")
cmds = {}
for cmd in ("emerge", "emaint"):
cmds[cmd] = (portage._python_interpreter,
"-b", "-Wd", os.path.join(self.bindir, cmd))
git_binary = find_binary("git")
git_cmd = (git_binary,)
committer_name = "Gentoo Dev"
committer_email = "[email protected]"
def repos_set_conf(sync_type, dflt_keys=None, xtra_keys=None):
env["PORTAGE_REPOSITORIES"] = repos_conf % {\
"EPREFIX": eprefix, "sync-type": sync_type,
"default_keys": "" if dflt_keys is None else dflt_keys,
"repo_extra_keys": "" if xtra_keys is None else xtra_keys}
def alter_ebuild():
with open(os.path.join(repo.location + "_sync",
"dev-libs", "A", "A-0.ebuild"), "a") as f:
f.write("\n")
os.unlink(os.path.join(metadata_dir, 'timestamp.chk'))
sync_cmds = (
(homedir, cmds["emerge"] + ("--sync",)),
(homedir, lambda: self.assertTrue(os.path.exists(
os.path.join(repo.location, "dev-libs", "A")
), "dev-libs/A expected, but missing")),
(homedir, cmds["emaint"] + ("sync", "-A")),
)
rename_repo = (
(homedir, lambda: os.rename(repo.location,
repo.location + "_sync")),
)
rsync_opts_repos = (
(homedir, alter_ebuild),
(homedir, lambda: repos_set_conf("rsync", None,
"sync-rsync-extra-opts = --backup --backup-dir=%s" %
_shell_quote(repo.location + "_back"))),
(homedir, cmds['emerge'] + ("--sync",)),
(homedir, lambda: self.assertTrue(os.path.exists(
repo.location + "_back"))),
(homedir, lambda: shutil.rmtree(repo.location + "_back")),
(homedir, lambda: repos_set_conf("rsync")),
)
rsync_opts_repos_default = (
(homedir, alter_ebuild),
(homedir, lambda: repos_set_conf("rsync",
"sync-rsync-extra-opts = --backup --backup-dir=%s" %
_shell_quote(repo.location+"_back"))),
(homedir, cmds['emerge'] + ("--sync",)),
(homedir, lambda: self.assertTrue(os.path.exists(repo.location + "_back"))),
(homedir, lambda: shutil.rmtree(repo.location + "_back")),
(homedir, lambda: repos_set_conf("rsync")),
)
rsync_opts_repos_default_ovr = (
(homedir, alter_ebuild),
(homedir, lambda: repos_set_conf("rsync",
"sync-rsync-extra-opts = --backup --backup-dir=%s" %
_shell_quote(repo.location + "_back_nowhere"),
"sync-rsync-extra-opts = --backup --backup-dir=%s" %
_shell_quote(repo.location + "_back"))),
(homedir, cmds['emerge'] + ("--sync",)),
(homedir, lambda: self.assertTrue(os.path.exists(repo.location + "_back"))),
(homedir, lambda: shutil.rmtree(repo.location + "_back")),
(homedir, lambda: repos_set_conf("rsync")),
)
rsync_opts_repos_default_cancel = (
(homedir, alter_ebuild),
(homedir, lambda: repos_set_conf("rsync",
"sync-rsync-extra-opts = --backup --backup-dir=%s" %
_shell_quote(repo.location + "_back_nowhere"),
"sync-rsync-extra-opts = ")),
(homedir, cmds['emerge'] + ("--sync",)),
(homedir, lambda: self.assertFalse(os.path.exists(repo.location + "_back"))),
(homedir, lambda: repos_set_conf("rsync")),
)
delete_sync_repo = (
(homedir, lambda: shutil.rmtree(
repo.location + "_sync")),
)
git_repo_create = (
(repo.location, git_cmd +
("config", "--global", "user.name", committer_name,)),
(repo.location, git_cmd +
("config", "--global", "user.email", committer_email,)),
(repo.location, git_cmd + ("init-db",)),
(repo.location, git_cmd + ("add", ".")),
(repo.location, git_cmd +
("commit", "-a", "-m", "add whole repo")),
)
sync_type_git = (
(homedir, lambda: repos_set_conf("git")),
)
pythonpath = os.environ.get("PYTHONPATH")
if pythonpath is not None and not pythonpath.strip():
pythonpath = None
if pythonpath is not None and \
pythonpath.split(":")[0] == PORTAGE_PYM_PATH:
pass
else:
if pythonpath is None:
pythonpath = ""
else:
pythonpath = ":" + pythonpath
pythonpath = PORTAGE_PYM_PATH + pythonpath
env = {
"PORTAGE_OVERRIDE_EPREFIX" : eprefix,
"DISTDIR" : distdir,
"GENTOO_COMMITTER_NAME" : committer_name,
"GENTOO_COMMITTER_EMAIL" : committer_email,
"HOME" : homedir,
"PATH" : os.environ["PATH"],
"PORTAGE_GRPNAME" : os.environ["PORTAGE_GRPNAME"],
"PORTAGE_USERNAME" : os.environ["PORTAGE_USERNAME"],
"PYTHONDONTWRITEBYTECODE" : os.environ.get("PYTHONDONTWRITEBYTECODE", ""),
"PYTHONPATH" : pythonpath,
}
repos_set_conf("rsync")
if os.environ.get("SANDBOX_ON") == "1":
# avoid problems from nested sandbox instances
env["FEATURES"] = "-sandbox -usersandbox"
dirs = [homedir, metadata_dir]
try:
for d in dirs:
ensure_dirs(d)
timestamp_path = os.path.join(metadata_dir, 'timestamp.chk')
with open(timestamp_path, 'w') as f:
f.write(time.strftime('%s\n' % TIMESTAMP_FORMAT, time.gmtime()))
if debug:
# The subprocess inherits both stdout and stderr, for
# debugging purposes.
stdout = None
else:
# The subprocess inherits stderr so that any warnings
# triggered by python -Wd will be visible.
stdout = subprocess.PIPE
for cwd, cmd in rename_repo + sync_cmds + \
rsync_opts_repos + rsync_opts_repos_default + \
rsync_opts_repos_default_ovr + rsync_opts_repos_default_cancel + \
delete_sync_repo + git_repo_create + sync_type_git + \
rename_repo + sync_cmds:
if hasattr(cmd, '__call__'):
cmd()
continue
abs_cwd = os.path.join(repo.location, cwd)
proc = subprocess.Popen(cmd,
cwd=abs_cwd, env=env, stdout=stdout)
if debug:
proc.wait()
else:
output = proc.stdout.readlines()
proc.wait()
proc.stdout.close()
if proc.returncode != os.EX_OK:
for line in output:
sys.stderr.write(_unicode_decode(line))
self.assertEqual(os.EX_OK, proc.returncode,
"%s failed in %s" % (cmd, cwd,))
finally:
playground.cleanup()
| gpl-2.0 |
dvliman/jaikuengine | .google_appengine/lib/django-1.2/django/contrib/webdesign/lorem_ipsum.py | 439 | 4872 | """
Utility functions for generating "lorem ipsum" Latin text.
"""
import random
COMMON_P = 'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.'
WORDS = ('exercitationem', 'perferendis', 'perspiciatis', 'laborum', 'eveniet',
'sunt', 'iure', 'nam', 'nobis', 'eum', 'cum', 'officiis', 'excepturi',
'odio', 'consectetur', 'quasi', 'aut', 'quisquam', 'vel', 'eligendi',
'itaque', 'non', 'odit', 'tempore', 'quaerat', 'dignissimos',
'facilis', 'neque', 'nihil', 'expedita', 'vitae', 'vero', 'ipsum',
'nisi', 'animi', 'cumque', 'pariatur', 'velit', 'modi', 'natus',
'iusto', 'eaque', 'sequi', 'illo', 'sed', 'ex', 'et', 'voluptatibus',
'tempora', 'veritatis', 'ratione', 'assumenda', 'incidunt', 'nostrum',
'placeat', 'aliquid', 'fuga', 'provident', 'praesentium', 'rem',
'necessitatibus', 'suscipit', 'adipisci', 'quidem', 'possimus',
'voluptas', 'debitis', 'sint', 'accusantium', 'unde', 'sapiente',
'voluptate', 'qui', 'aspernatur', 'laudantium', 'soluta', 'amet',
'quo', 'aliquam', 'saepe', 'culpa', 'libero', 'ipsa', 'dicta',
'reiciendis', 'nesciunt', 'doloribus', 'autem', 'impedit', 'minima',
'maiores', 'repudiandae', 'ipsam', 'obcaecati', 'ullam', 'enim',
'totam', 'delectus', 'ducimus', 'quis', 'voluptates', 'dolores',
'molestiae', 'harum', 'dolorem', 'quia', 'voluptatem', 'molestias',
'magni', 'distinctio', 'omnis', 'illum', 'dolorum', 'voluptatum', 'ea',
'quas', 'quam', 'corporis', 'quae', 'blanditiis', 'atque', 'deserunt',
'laboriosam', 'earum', 'consequuntur', 'hic', 'cupiditate',
'quibusdam', 'accusamus', 'ut', 'rerum', 'error', 'minus', 'eius',
'ab', 'ad', 'nemo', 'fugit', 'officia', 'at', 'in', 'id', 'quos',
'reprehenderit', 'numquam', 'iste', 'fugiat', 'sit', 'inventore',
'beatae', 'repellendus', 'magnam', 'recusandae', 'quod', 'explicabo',
'doloremque', 'aperiam', 'consequatur', 'asperiores', 'commodi',
'optio', 'dolor', 'labore', 'temporibus', 'repellat', 'veniam',
'architecto', 'est', 'esse', 'mollitia', 'nulla', 'a', 'similique',
'eos', 'alias', 'dolore', 'tenetur', 'deleniti', 'porro', 'facere',
'maxime', 'corrupti')
COMMON_WORDS = ('lorem', 'ipsum', 'dolor', 'sit', 'amet', 'consectetur',
'adipisicing', 'elit', 'sed', 'do', 'eiusmod', 'tempor', 'incididunt',
'ut', 'labore', 'et', 'dolore', 'magna', 'aliqua')
def sentence():
"""
Returns a randomly generated sentence of lorem ipsum text.
The first word is capitalized, and the sentence ends in either a period or
question mark. Commas are added at random.
"""
# Determine the number of comma-separated sections and number of words in
# each section for this sentence.
sections = [u' '.join(random.sample(WORDS, random.randint(3, 12))) for i in range(random.randint(1, 5))]
s = u', '.join(sections)
# Convert to sentence case and add end punctuation.
return u'%s%s%s' % (s[0].upper(), s[1:], random.choice('?.'))
def paragraph():
"""
Returns a randomly generated paragraph of lorem ipsum text.
The paragraph consists of between 1 and 4 sentences, inclusive.
"""
return u' '.join([sentence() for i in range(random.randint(1, 4))])
def paragraphs(count, common=True):
"""
Returns a list of paragraphs as returned by paragraph().
If `common` is True, then the first paragraph will be the standard
'lorem ipsum' paragraph. Otherwise, the first paragraph will be random
Latin text. Either way, subsequent paragraphs will be random Latin text.
"""
paras = []
for i in range(count):
if common and i == 0:
paras.append(COMMON_P)
else:
paras.append(paragraph())
return paras
def words(count, common=True):
"""
Returns a string of `count` lorem ipsum words separated by a single space.
If `common` is True, then the first 19 words will be the standard
'lorem ipsum' words. Otherwise, all words will be selected randomly.
"""
if common:
word_list = list(COMMON_WORDS)
else:
word_list = []
c = len(word_list)
if count > c:
count -= c
while count > 0:
c = min(count, len(WORDS))
count -= c
word_list += random.sample(WORDS, c)
else:
word_list = word_list[:count]
return u' '.join(word_list)
| apache-2.0 |
alangwansui/mtl_ordercenter | openerp/addons/base/ir/osv_memory_autovacuum.py | 447 | 1450 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import openerp
class osv_memory_autovacuum(openerp.osv.osv.osv_memory):
""" Expose the osv_memory.vacuum() method to the cron jobs mechanism. """
_name = 'osv_memory.autovacuum'
def power_on(self, cr, uid, context=None):
for model in self.pool.models.values():
if model.is_transient():
model._transient_vacuum(cr, uid, force=True)
return True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
gminds/rapidnewsng | django/contrib/gis/tests/test_geoforms.py | 110 | 3800 | from django.forms import ValidationError
from django.contrib.gis.gdal import HAS_GDAL
from django.contrib.gis.tests.utils import HAS_SPATIALREFSYS
from django.utils import unittest
if HAS_SPATIALREFSYS:
from django.contrib.gis import forms
from django.contrib.gis.geos import GEOSGeometry
@unittest.skipUnless(HAS_GDAL and HAS_SPATIALREFSYS, "GeometryFieldTest needs gdal support and a spatial database")
class GeometryFieldTest(unittest.TestCase):
def test00_init(self):
"Testing GeometryField initialization with defaults."
fld = forms.GeometryField()
for bad_default in ('blah', 3, 'FoO', None, 0):
self.assertRaises(ValidationError, fld.clean, bad_default)
def test01_srid(self):
"Testing GeometryField with a SRID set."
# Input that doesn't specify the SRID is assumed to be in the SRID
# of the input field.
fld = forms.GeometryField(srid=4326)
geom = fld.clean('POINT(5 23)')
self.assertEqual(4326, geom.srid)
# Making the field in a different SRID from that of the geometry, and
# asserting it transforms.
fld = forms.GeometryField(srid=32140)
tol = 0.0000001
xform_geom = GEOSGeometry('POINT (951640.547328465 4219369.26171664)', srid=32140)
# The cleaned geometry should be transformed to 32140.
cleaned_geom = fld.clean('SRID=4326;POINT (-95.363151 29.763374)')
self.assertTrue(xform_geom.equals_exact(cleaned_geom, tol))
def test02_null(self):
"Testing GeometryField's handling of null (None) geometries."
# Form fields, by default, are required (`required=True`)
fld = forms.GeometryField()
self.assertRaises(forms.ValidationError, fld.clean, None)
# Still not allowed if `null=False`.
fld = forms.GeometryField(required=False, null=False)
self.assertRaises(forms.ValidationError, fld.clean, None)
# This will clean None as a geometry (See #10660).
fld = forms.GeometryField(required=False)
self.assertEqual(None, fld.clean(None))
def test03_geom_type(self):
"Testing GeometryField's handling of different geometry types."
# By default, all geometry types are allowed.
fld = forms.GeometryField()
for wkt in ('POINT(5 23)', 'MULTIPOLYGON(((0 0, 0 1, 1 1, 1 0, 0 0)))', 'LINESTRING(0 0, 1 1)'):
self.assertEqual(GEOSGeometry(wkt), fld.clean(wkt))
pnt_fld = forms.GeometryField(geom_type='POINT')
self.assertEqual(GEOSGeometry('POINT(5 23)'), pnt_fld.clean('POINT(5 23)'))
# a WKT for any other geom_type will be properly transformed by `to_python`
self.assertEqual(GEOSGeometry('LINESTRING(0 0, 1 1)'), pnt_fld.to_python('LINESTRING(0 0, 1 1)'))
# but rejected by `clean`
self.assertRaises(forms.ValidationError, pnt_fld.clean, 'LINESTRING(0 0, 1 1)')
def test04_to_python(self):
"""
Testing to_python returns a correct GEOSGeometry object or
a ValidationError
"""
fld = forms.GeometryField()
# to_python returns the same GEOSGeometry for a WKT
for wkt in ('POINT(5 23)', 'MULTIPOLYGON(((0 0, 0 1, 1 1, 1 0, 0 0)))', 'LINESTRING(0 0, 1 1)'):
self.assertEqual(GEOSGeometry(wkt), fld.to_python(wkt))
# but raises a ValidationError for any other string
for wkt in ('POINT(5)', 'MULTI POLYGON(((0 0, 0 1, 1 1, 1 0, 0 0)))', 'BLAH(0 0, 1 1)'):
self.assertRaises(forms.ValidationError, fld.to_python, wkt)
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(GeometryFieldTest))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
if __name__=="__main__":
run()
| bsd-3-clause |
svn2github/django | django/contrib/gis/geos/mutable_list.py | 405 | 10386 | # Copyright (c) 2008-2009 Aryeh Leib Taurog, all rights reserved.
# Released under the New BSD license.
"""
This module contains a base type which provides list-style mutations
without specific data storage methods.
See also http://www.aryehleib.com/MutableLists.html
Author: Aryeh Leib Taurog.
"""
class ListMixin(object):
"""
A base class which provides complete list interface.
Derived classes must call ListMixin's __init__() function
and implement the following:
function _get_single_external(self, i):
Return single item with index i for general use.
The index i will always satisfy 0 <= i < len(self).
function _get_single_internal(self, i):
Same as above, but for use within the class [Optional]
Note that if _get_single_internal and _get_single_internal return
different types of objects, _set_list must distinguish
between the two and handle each appropriately.
function _set_list(self, length, items):
Recreate the entire object.
NOTE: items may be a generator which calls _get_single_internal.
Therefore, it is necessary to cache the values in a temporary:
temp = list(items)
before clobbering the original storage.
function _set_single(self, i, value):
Set the single item at index i to value [Optional]
If left undefined, all mutations will result in rebuilding
the object using _set_list.
function __len__(self):
Return the length
int _minlength:
The minimum legal length [Optional]
int _maxlength:
The maximum legal length [Optional]
type or tuple _allowed:
A type or tuple of allowed item types [Optional]
class _IndexError:
The type of exception to be raise on invalid index [Optional]
"""
_minlength = 0
_maxlength = None
_IndexError = IndexError
### Python initialization and special list interface methods ###
def __init__(self, *args, **kwargs):
if not hasattr(self, '_get_single_internal'):
self._get_single_internal = self._get_single_external
if not hasattr(self, '_set_single'):
self._set_single = self._set_single_rebuild
self._assign_extended_slice = self._assign_extended_slice_rebuild
super(ListMixin, self).__init__(*args, **kwargs)
def __getitem__(self, index):
"Get the item(s) at the specified index/slice."
if isinstance(index, slice):
return [self._get_single_external(i) for i in xrange(*index.indices(len(self)))]
else:
index = self._checkindex(index)
return self._get_single_external(index)
def __delitem__(self, index):
"Delete the item(s) at the specified index/slice."
if not isinstance(index, (int, long, slice)):
raise TypeError("%s is not a legal index" % index)
# calculate new length and dimensions
origLen = len(self)
if isinstance(index, (int, long)):
index = self._checkindex(index)
indexRange = [index]
else:
indexRange = range(*index.indices(origLen))
newLen = origLen - len(indexRange)
newItems = ( self._get_single_internal(i)
for i in xrange(origLen)
if i not in indexRange )
self._rebuild(newLen, newItems)
def __setitem__(self, index, val):
"Set the item(s) at the specified index/slice."
if isinstance(index, slice):
self._set_slice(index, val)
else:
index = self._checkindex(index)
self._check_allowed((val,))
self._set_single(index, val)
def __iter__(self):
"Iterate over the items in the list"
for i in xrange(len(self)):
yield self[i]
### Special methods for arithmetic operations ###
def __add__(self, other):
'add another list-like object'
return self.__class__(list(self) + list(other))
def __radd__(self, other):
'add to another list-like object'
return other.__class__(list(other) + list(self))
def __iadd__(self, other):
'add another list-like object to self'
self.extend(list(other))
return self
def __mul__(self, n):
'multiply'
return self.__class__(list(self) * n)
def __rmul__(self, n):
'multiply'
return self.__class__(list(self) * n)
def __imul__(self, n):
'multiply'
if n <= 0:
del self[:]
else:
cache = list(self)
for i in range(n-1):
self.extend(cache)
return self
def __cmp__(self, other):
'cmp'
slen = len(self)
for i in range(slen):
try:
c = cmp(self[i], other[i])
except IndexError:
# must be other is shorter
return 1
else:
# elements not equal
if c: return c
return cmp(slen, len(other))
### Public list interface Methods ###
## Non-mutating ##
def count(self, val):
"Standard list count method"
count = 0
for i in self:
if val == i: count += 1
return count
def index(self, val):
"Standard list index method"
for i in xrange(0, len(self)):
if self[i] == val: return i
raise ValueError('%s not found in object' % str(val))
## Mutating ##
def append(self, val):
"Standard list append method"
self[len(self):] = [val]
def extend(self, vals):
"Standard list extend method"
self[len(self):] = vals
def insert(self, index, val):
"Standard list insert method"
if not isinstance(index, (int, long)):
raise TypeError("%s is not a legal index" % index)
self[index:index] = [val]
def pop(self, index=-1):
"Standard list pop method"
result = self[index]
del self[index]
return result
def remove(self, val):
"Standard list remove method"
del self[self.index(val)]
def reverse(self):
"Standard list reverse method"
self[:] = self[-1::-1]
def sort(self, cmp=cmp, key=None, reverse=False):
"Standard list sort method"
if key:
temp = [(key(v),v) for v in self]
temp.sort(cmp=cmp, key=lambda x: x[0], reverse=reverse)
self[:] = [v[1] for v in temp]
else:
temp = list(self)
temp.sort(cmp=cmp, reverse=reverse)
self[:] = temp
### Private routines ###
def _rebuild(self, newLen, newItems):
if newLen < self._minlength:
raise ValueError('Must have at least %d items' % self._minlength)
if self._maxlength is not None and newLen > self._maxlength:
raise ValueError('Cannot have more than %d items' % self._maxlength)
self._set_list(newLen, newItems)
def _set_single_rebuild(self, index, value):
self._set_slice(slice(index, index + 1, 1), [value])
def _checkindex(self, index, correct=True):
length = len(self)
if 0 <= index < length:
return index
if correct and -length <= index < 0:
return index + length
raise self._IndexError('invalid index: %s' % str(index))
def _check_allowed(self, items):
if hasattr(self, '_allowed'):
if False in [isinstance(val, self._allowed) for val in items]:
raise TypeError('Invalid type encountered in the arguments.')
def _set_slice(self, index, values):
"Assign values to a slice of the object"
try:
iter(values)
except TypeError:
raise TypeError('can only assign an iterable to a slice')
self._check_allowed(values)
origLen = len(self)
valueList = list(values)
start, stop, step = index.indices(origLen)
# CAREFUL: index.step and step are not the same!
# step will never be None
if index.step is None:
self._assign_simple_slice(start, stop, valueList)
else:
self._assign_extended_slice(start, stop, step, valueList)
def _assign_extended_slice_rebuild(self, start, stop, step, valueList):
'Assign an extended slice by rebuilding entire list'
indexList = range(start, stop, step)
# extended slice, only allow assigning slice of same size
if len(valueList) != len(indexList):
raise ValueError('attempt to assign sequence of size %d '
'to extended slice of size %d'
% (len(valueList), len(indexList)))
# we're not changing the length of the sequence
newLen = len(self)
newVals = dict(zip(indexList, valueList))
def newItems():
for i in xrange(newLen):
if i in newVals:
yield newVals[i]
else:
yield self._get_single_internal(i)
self._rebuild(newLen, newItems())
def _assign_extended_slice(self, start, stop, step, valueList):
'Assign an extended slice by re-assigning individual items'
indexList = range(start, stop, step)
# extended slice, only allow assigning slice of same size
if len(valueList) != len(indexList):
raise ValueError('attempt to assign sequence of size %d '
'to extended slice of size %d'
% (len(valueList), len(indexList)))
for i, val in zip(indexList, valueList):
self._set_single(i, val)
def _assign_simple_slice(self, start, stop, valueList):
'Assign a simple slice; Can assign slice of any length'
origLen = len(self)
stop = max(start, stop)
newLen = origLen - stop + start + len(valueList)
def newItems():
for i in xrange(origLen + 1):
if i == start:
for val in valueList:
yield val
if i < origLen:
if i < start or i >= stop:
yield self._get_single_internal(i)
self._rebuild(newLen, newItems())
| bsd-3-clause |
xfire/pydzen | pydzen.py | 1 | 11601 | #!/usr/bin/env python
#
# Copyright (C) 2008 Rico Schiekel (fire at downgra dot de)
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# vim:syntax=python:sw=4:ts=4:expandtab
import sys
import os
import re
import types
import time
import subprocess
import logging
from optparse import OptionParser
class utils(object):
@staticmethod
def screens(screens = 0):
"""
try to get number of xinerama screens and return a list of screen numbers.
first check if parameter value is > 0, if so, use it.
second check XINERAMA_SCREENS environment variable, which should contain the
number of screens.
if the environment variable is not set, try xrandr to get the number of
connected displays.
if xrandr fails, return one screen. (-> [0])
"""
logger = logging.getLogger('utils')
if screens <= 0:
logger.debug('try to read environment variable "XINERAMA_SCREENS"')
screens = os.environ.get('XINERAMA_SCREENS')
if isinstance(screens, types.StringTypes):
try:
screens = int(screens)
except ValueError:
logger.error('XINERAMA_SCREENS invalid (%s)' % screens)
screens = 0
if not screens:
try:
logger.debug('try to use xrandr to determine number of connected screens')
screens = utils.execute('xrandr')
screens = len(re.findall(" connected ", screens, re.M))
except OSError:
logger.warning('can not execute xrandr')
screens = 1
logger.debug('found %d screens' % screens)
return range(0, screens)
@staticmethod
def parse_app(args, regex_list, value = None):
"""
parse the output on stdout from an application with one or several
regular expressions.
return an dictionary with all matches.
"""
logging.getLogger('utils').debug('parse_app(%s, %s, %s)' % (args, regex_list, value))
return utils.parse(execute(args, value).split('\n'), regex_list)
@staticmethod
def parse_file(path_list, regex_list):
"""
parse one or several files with one or several regular expressions.
return an dictionary with all matches.
"""
logger = logging.getLogger('utils')
logger.debug('parse_file(%s, %s)' % (path_list, regex_list))
if not isinstance(path_list, (types.ListType, types.TupleType)):
path_list = [path_list]
lines = []
for path in path_list:
try:
file = open(path, 'r')
lines.extend(file.readlines())
file.close()
except IOError, e:
logger.exception(e)
return utils.parse(lines, regex_list)
@staticmethod
def parse(lines, regex_list):
"""
parse a list of lines with one or several regular expressions.
matching groups must be named with (?P<name>...).
all matches are returned as dictionary, where the key is the group
name with the (maybe multiple) matches as list.
"""
if not isinstance(regex_list, (types.ListType, types.TupleType)):
regex_list = [regex_list]
ret = {}
for line in lines:
for regex in regex_list:
match = regex.match(line)
if match:
for k, v in match.groupdict().iteritems():
ret.setdefault(k, []).append(v)
return ret
@staticmethod
def pipe(app, **kwargs):
"""
execute an application and return an communication object (returned by
subprocess.Popen(...)).
all parameters in **kwargs will be used as command line parameters for the
application. e.g.
execute('foo', v = True, w = 60, i = '/proc/bar')
-> foo -v -w 60 -i /proc/bar
"""
logger = logging.getLogger('utils')
def _to_param(k, v):
if isinstance(v, types.BooleanType):
return ['-%s' % k]
return ['-%s' % k, '%s' % str(v)]
args = [app]
for k,v in kwargs.iteritems():
if not isinstance(v, types.NoneType):
args.extend(_to_param(k,v))
try:
logger.debug('utils.pipe(%s)' % str(args))
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, close_fds=True)
except OSError, e:
logger.error('can not execute "%s": %s' % (app, e))
sys.exit(1)
return p
@staticmethod
def execute(app, value = None, **kwargs):
"""
execute an application 'app'. if 'value' is unequal None, then it's send
via stdin to the application.
all parameters in **kwargs will be used as command line parameters for the
application. e.g.
execute('foo', v = True, w = 60, i = '/proc/bar')
-> foo -v -w 60 -i /proc/bar
all output on stdout generated by the application is returned.
if an error occurs, pydzen will be terminated.
"""
logger = logging.getLogger('utils')
# if not value: value = ''
p = utils.pipe(app, **kwargs)
if value:
out, err = p.communicate(str(value))
else:
out, err = p.communicate()
if err:
logger.error('execute: error: %s' % err)
sys.exit(1)
return out
@staticmethod
def dzen(**kwargs):
"""
return an communication object (returned by subprocess.Popen(...))
to an dzen instance.
all parameters from **kwargs overwrite the default parameters in
config.DZEN_OPTIONS.
"""
args = config.DZEN_OPTIONS.copy()
args.update(kwargs)
return utils.pipe(config.DZEN, **args)
@staticmethod
def gdbar(value, **kwargs):
"""
execute gdbar and return the generated string.
all parameters from **kwargs overwrite the default parameters in
config.GDBAR_OPTIONS.
"""
args = config.GDBAR_OPTIONS.copy()
args.update(kwargs)
return utils.execute(config.GDBAR, value, **args)
@staticmethod
def cache(timeout):
"""
decorator, to cache the return value of an function for several
seconds.
"""
def wrapper(f, cache={}):
def nfunc():
key = f
if key not in cache:
cache[key] = [f(), time.time()]
elif (time.time() - cache[key][1]) >= timeout:
cache[key] = [f(), time.time()]
return cache[key][0]
return nfunc
return wrapper
def load_plugins():
"""
try to load plugins from 'config.PLUGIN_DIR'.
each plugin must define an 'update' method, which returns either a string, an array
of strings or None.
"""
logger = logging.getLogger('pydzen')
sys.path.insert(0, os.path.expanduser(config.PLUGIN_DIR))
plugins = []
for p in config.PLUGINS:
try:
plugin = __import__(p, {}, {}, '*')
if hasattr(plugin, 'update'):
logger.debug('load plugin: "%s"' % p)
plugins.append(plugin)
else:
logger.warning('invalid plugin "%s": no update() function specified' % p)
except ImportError, e:
logger.error('error loading plugin "%s": %s' % (p, e))
sys.path = sys.path[1:]
return plugins
def init_logger():
logging.basicConfig(level = config.LOGLEVEL,
format = '%(asctime)s %(name)-8s %(levelname)-6s %(message)s')
def read_config_file(file, **defaults):
"""
try to read the configuration file "file".
this is a normal python file, which defines several variables. these variables are
then accessable through the ConfigWrapper object as normal member variables.
**defaults are default configuration variables, which might be overwritten.
"""
config = defaults.copy()
try:
execfile(os.path.expanduser(file), {}, config)
except StandardError, e:
print 'Invalid configuration file: %s' % e
sys.exit(1)
class _ConfigWrapper(dict):
def __init__(self, *args, **kwargs):
super(_ConfigWrapper, self).__init__(*args, **kwargs)
def __getattr__(self, name):
return self[name]
def __setattr__(self, name, value):
self[name] = value
return self[name]
return _ConfigWrapper(config)
def configure():
"""
parse command line parameters, then read config file and return
an configuration object.
"""
parser = OptionParser()
parser.add_option('-c', '--config', dest = 'CONFIG_FILE',
help = 'specify an alternate pydzenrc file')
parser.add_option('-p', '--plugins', dest = 'PLUGIN_DIR',
help = 'specify an alternate plugin directory')
parser.add_option('-s', '--screens', dest = 'SCREENS', type = 'int',
help = 'number of Xinerama screen')
parser.set_defaults(CONFIG_FILE = '~/.pydzen/pydzenrc',
PLUGIN_DIR = '~/.pydzen',
SCREENS = 0)
(options, args) = parser.parse_args()
config = read_config_file(options.CONFIG_FILE,
PLUGINS = [],
LOGLEVEL = logging.WARN,
SCREENS = options.SCREENS,
PLUGIN_DIR = options.PLUGIN_DIR)
return config
config = configure()
if __name__ == '__main__':
init_logger()
logger = logging.getLogger('pydzen')
plugins = load_plugins()
dzens = [utils.dzen(xs = i + 1) for i in utils.screens(config.SCREENS)]
try:
while True:
lines = []
for p in plugins:
values = p.update()
if values:
if not isinstance(values, (types.ListType, types.TupleType)):
values = [values]
for i, value in enumerate(values):
if len(lines) < (i + 1):
lines.append([])
if value:
lines[i].append(value)
lines = [config.JOINTS.join(line) for line in lines if line]
lines = '\n'.join(lines) + '\n'
for d in dzens:
d.stdin.write(lines)
del lines
time.sleep(1)
except IOError, e:
try:
logger.error(d.stderr.read())
except StandardError, se:
logger.error(se)
except StandardError, e:
logger.error(e)
except KeyboardInterrupt:
pass
| gpl-2.0 |
ahoyosid/scikit-learn | examples/applications/plot_stock_market.py | 227 | 8284 | """
=======================================
Visualizing the stock market structure
=======================================
This example employs several unsupervised learning techniques to extract
the stock market structure from variations in historical quotes.
The quantity that we use is the daily variation in quote price: quotes
that are linked tend to cofluctuate during a day.
.. _stock_market:
Learning a graph structure
--------------------------
We use sparse inverse covariance estimation to find which quotes are
correlated conditionally on the others. Specifically, sparse inverse
covariance gives us a graph, that is a list of connection. For each
symbol, the symbols that it is connected too are those useful to explain
its fluctuations.
Clustering
----------
We use clustering to group together quotes that behave similarly. Here,
amongst the :ref:`various clustering techniques <clustering>` available
in the scikit-learn, we use :ref:`affinity_propagation` as it does
not enforce equal-size clusters, and it can choose automatically the
number of clusters from the data.
Note that this gives us a different indication than the graph, as the
graph reflects conditional relations between variables, while the
clustering reflects marginal properties: variables clustered together can
be considered as having a similar impact at the level of the full stock
market.
Embedding in 2D space
---------------------
For visualization purposes, we need to lay out the different symbols on a
2D canvas. For this we use :ref:`manifold` techniques to retrieve 2D
embedding.
Visualization
-------------
The output of the 3 models are combined in a 2D graph where nodes
represents the stocks and edges the:
- cluster labels are used to define the color of the nodes
- the sparse covariance model is used to display the strength of the edges
- the 2D embedding is used to position the nodes in the plan
This example has a fair amount of visualization-related code, as
visualization is crucial here to display the graph. One of the challenge
is to position the labels minimizing overlap. For this we use an
heuristic based on the direction of the nearest neighbor along each
axis.
"""
print(__doc__)
# Author: Gael Varoquaux [email protected]
# License: BSD 3 clause
import datetime
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import finance
from matplotlib.collections import LineCollection
from sklearn import cluster, covariance, manifold
###############################################################################
# Retrieve the data from Internet
# Choose a time period reasonnably calm (not too long ago so that we get
# high-tech firms, and before the 2008 crash)
d1 = datetime.datetime(2003, 1, 1)
d2 = datetime.datetime(2008, 1, 1)
# kraft symbol has now changed from KFT to MDLZ in yahoo
symbol_dict = {
'TOT': 'Total',
'XOM': 'Exxon',
'CVX': 'Chevron',
'COP': 'ConocoPhillips',
'VLO': 'Valero Energy',
'MSFT': 'Microsoft',
'IBM': 'IBM',
'TWX': 'Time Warner',
'CMCSA': 'Comcast',
'CVC': 'Cablevision',
'YHOO': 'Yahoo',
'DELL': 'Dell',
'HPQ': 'HP',
'AMZN': 'Amazon',
'TM': 'Toyota',
'CAJ': 'Canon',
'MTU': 'Mitsubishi',
'SNE': 'Sony',
'F': 'Ford',
'HMC': 'Honda',
'NAV': 'Navistar',
'NOC': 'Northrop Grumman',
'BA': 'Boeing',
'KO': 'Coca Cola',
'MMM': '3M',
'MCD': 'Mc Donalds',
'PEP': 'Pepsi',
'MDLZ': 'Kraft Foods',
'K': 'Kellogg',
'UN': 'Unilever',
'MAR': 'Marriott',
'PG': 'Procter Gamble',
'CL': 'Colgate-Palmolive',
'GE': 'General Electrics',
'WFC': 'Wells Fargo',
'JPM': 'JPMorgan Chase',
'AIG': 'AIG',
'AXP': 'American express',
'BAC': 'Bank of America',
'GS': 'Goldman Sachs',
'AAPL': 'Apple',
'SAP': 'SAP',
'CSCO': 'Cisco',
'TXN': 'Texas instruments',
'XRX': 'Xerox',
'LMT': 'Lookheed Martin',
'WMT': 'Wal-Mart',
'WBA': 'Walgreen',
'HD': 'Home Depot',
'GSK': 'GlaxoSmithKline',
'PFE': 'Pfizer',
'SNY': 'Sanofi-Aventis',
'NVS': 'Novartis',
'KMB': 'Kimberly-Clark',
'R': 'Ryder',
'GD': 'General Dynamics',
'RTN': 'Raytheon',
'CVS': 'CVS',
'CAT': 'Caterpillar',
'DD': 'DuPont de Nemours'}
symbols, names = np.array(list(symbol_dict.items())).T
quotes = [finance.quotes_historical_yahoo(symbol, d1, d2, asobject=True)
for symbol in symbols]
open = np.array([q.open for q in quotes]).astype(np.float)
close = np.array([q.close for q in quotes]).astype(np.float)
# The daily variations of the quotes are what carry most information
variation = close - open
###############################################################################
# Learn a graphical structure from the correlations
edge_model = covariance.GraphLassoCV()
# standardize the time series: using correlations rather than covariance
# is more efficient for structure recovery
X = variation.copy().T
X /= X.std(axis=0)
edge_model.fit(X)
###############################################################################
# Cluster using affinity propagation
_, labels = cluster.affinity_propagation(edge_model.covariance_)
n_labels = labels.max()
for i in range(n_labels + 1):
print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))
###############################################################################
# Find a low-dimension embedding for visualization: find the best position of
# the nodes (the stocks) on a 2D plane
# We use a dense eigen_solver to achieve reproducibility (arpack is
# initiated with random vectors that we don't control). In addition, we
# use a large number of neighbors to capture the large-scale structure.
node_position_model = manifold.LocallyLinearEmbedding(
n_components=2, eigen_solver='dense', n_neighbors=6)
embedding = node_position_model.fit_transform(X.T).T
###############################################################################
# Visualization
plt.figure(1, facecolor='w', figsize=(10, 8))
plt.clf()
ax = plt.axes([0., 0., 1., 1.])
plt.axis('off')
# Display a graph of the partial correlations
partial_correlations = edge_model.precision_.copy()
d = 1 / np.sqrt(np.diag(partial_correlations))
partial_correlations *= d
partial_correlations *= d[:, np.newaxis]
non_zero = (np.abs(np.triu(partial_correlations, k=1)) > 0.02)
# Plot the nodes using the coordinates of our embedding
plt.scatter(embedding[0], embedding[1], s=100 * d ** 2, c=labels,
cmap=plt.cm.spectral)
# Plot the edges
start_idx, end_idx = np.where(non_zero)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[embedding[:, start], embedding[:, stop]]
for start, stop in zip(start_idx, end_idx)]
values = np.abs(partial_correlations[non_zero])
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, .7 * values.max()))
lc.set_array(values)
lc.set_linewidths(15 * values)
ax.add_collection(lc)
# Add a label to each node. The challenge here is that we want to
# position the labels to avoid overlap with other labels
for index, (name, label, (x, y)) in enumerate(
zip(names, labels, embedding.T)):
dx = x - embedding[0]
dx[index] = 1
dy = y - embedding[1]
dy[index] = 1
this_dx = dx[np.argmin(np.abs(dy))]
this_dy = dy[np.argmin(np.abs(dx))]
if this_dx > 0:
horizontalalignment = 'left'
x = x + .002
else:
horizontalalignment = 'right'
x = x - .002
if this_dy > 0:
verticalalignment = 'bottom'
y = y + .002
else:
verticalalignment = 'top'
y = y - .002
plt.text(x, y, name, size=10,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
bbox=dict(facecolor='w',
edgecolor=plt.cm.spectral(label / float(n_labels)),
alpha=.6))
plt.xlim(embedding[0].min() - .15 * embedding[0].ptp(),
embedding[0].max() + .10 * embedding[0].ptp(),)
plt.ylim(embedding[1].min() - .03 * embedding[1].ptp(),
embedding[1].max() + .03 * embedding[1].ptp())
plt.show()
| bsd-3-clause |
a13m/ansible | lib/ansible/runner/lookup_plugins/items.py | 166 | 1405 | # (c) 2012, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from ansible.utils import safe_eval
import ansible.utils as utils
import ansible.errors as errors
def flatten(terms):
ret = []
for term in terms:
if isinstance(term, list):
ret.extend(term)
else:
ret.append(term)
return ret
class LookupModule(object):
def __init__(self, basedir=None, **kwargs):
self.basedir = basedir
def run(self, terms, inject=None, **kwargs):
terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
if not isinstance(terms, list) and not isinstance(terms,set):
raise errors.AnsibleError("with_items expects a list or a set")
return flatten(terms)
| gpl-3.0 |
vishnugonela/boto | boto/pyami/config.py | 95 | 8016 | # Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2011 Chris Moyer http://coredumped.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import os
import re
import warnings
import boto
from boto.compat import expanduser, ConfigParser, StringIO
# By default we use two locations for the boto configurations,
# /etc/boto.cfg and ~/.boto (which works on Windows and Unix).
BotoConfigPath = '/etc/boto.cfg'
BotoConfigLocations = [BotoConfigPath]
UserConfigPath = os.path.join(expanduser('~'), '.boto')
BotoConfigLocations.append(UserConfigPath)
# If there's a BOTO_CONFIG variable set, we load ONLY
# that variable
if 'BOTO_CONFIG' in os.environ:
BotoConfigLocations = [expanduser(os.environ['BOTO_CONFIG'])]
# If there's a BOTO_PATH variable set, we use anything there
# as the current configuration locations, split with os.pathsep.
elif 'BOTO_PATH' in os.environ:
BotoConfigLocations = []
for path in os.environ['BOTO_PATH'].split(os.pathsep):
BotoConfigLocations.append(expanduser(path))
class Config(ConfigParser):
def __init__(self, path=None, fp=None, do_load=True):
# We don't use ``super`` here, because ``ConfigParser`` still uses
# old-style classes.
ConfigParser.__init__(self, {'working_dir': '/mnt/pyami',
'debug': '0'})
if do_load:
if path:
self.load_from_path(path)
elif fp:
self.readfp(fp)
else:
self.read(BotoConfigLocations)
if "AWS_CREDENTIAL_FILE" in os.environ:
full_path = expanduser(os.environ['AWS_CREDENTIAL_FILE'])
try:
self.load_credential_file(full_path)
except IOError:
warnings.warn('Unable to load AWS_CREDENTIAL_FILE (%s)' % full_path)
def load_credential_file(self, path):
"""Load a credential file as is setup like the Java utilities"""
c_data = StringIO()
c_data.write("[Credentials]\n")
for line in open(path, "r").readlines():
c_data.write(line.replace("AWSAccessKeyId", "aws_access_key_id").replace("AWSSecretKey", "aws_secret_access_key"))
c_data.seek(0)
self.readfp(c_data)
def load_from_path(self, path):
file = open(path)
for line in file.readlines():
match = re.match("^#import[\s\t]*([^\s^\t]*)[\s\t]*$", line)
if match:
extended_file = match.group(1)
(dir, file) = os.path.split(path)
self.load_from_path(os.path.join(dir, extended_file))
self.read(path)
def save_option(self, path, section, option, value):
"""
Write the specified Section.Option to the config file specified by path.
Replace any previous value. If the path doesn't exist, create it.
Also add the option the the in-memory config.
"""
config = ConfigParser()
config.read(path)
if not config.has_section(section):
config.add_section(section)
config.set(section, option, value)
fp = open(path, 'w')
config.write(fp)
fp.close()
if not self.has_section(section):
self.add_section(section)
self.set(section, option, value)
def save_user_option(self, section, option, value):
self.save_option(UserConfigPath, section, option, value)
def save_system_option(self, section, option, value):
self.save_option(BotoConfigPath, section, option, value)
def get_instance(self, name, default=None):
try:
val = self.get('Instance', name)
except:
val = default
return val
def get_user(self, name, default=None):
try:
val = self.get('User', name)
except:
val = default
return val
def getint_user(self, name, default=0):
try:
val = self.getint('User', name)
except:
val = default
return val
def get_value(self, section, name, default=None):
return self.get(section, name, default)
def get(self, section, name, default=None):
try:
val = ConfigParser.get(self, section, name)
except:
val = default
return val
def getint(self, section, name, default=0):
try:
val = ConfigParser.getint(self, section, name)
except:
val = int(default)
return val
def getfloat(self, section, name, default=0.0):
try:
val = ConfigParser.getfloat(self, section, name)
except:
val = float(default)
return val
def getbool(self, section, name, default=False):
if self.has_option(section, name):
val = self.get(section, name)
if val.lower() == 'true':
val = True
else:
val = False
else:
val = default
return val
def setbool(self, section, name, value):
if value:
self.set(section, name, 'true')
else:
self.set(section, name, 'false')
def dump(self):
s = StringIO()
self.write(s)
print(s.getvalue())
def dump_safe(self, fp=None):
if not fp:
fp = StringIO()
for section in self.sections():
fp.write('[%s]\n' % section)
for option in self.options(section):
if option == 'aws_secret_access_key':
fp.write('%s = xxxxxxxxxxxxxxxxxx\n' % option)
else:
fp.write('%s = %s\n' % (option, self.get(section, option)))
def dump_to_sdb(self, domain_name, item_name):
from boto.compat import json
sdb = boto.connect_sdb()
domain = sdb.lookup(domain_name)
if not domain:
domain = sdb.create_domain(domain_name)
item = domain.new_item(item_name)
item.active = False
for section in self.sections():
d = {}
for option in self.options(section):
d[option] = self.get(section, option)
item[section] = json.dumps(d)
item.save()
def load_from_sdb(self, domain_name, item_name):
from boto.compat import json
sdb = boto.connect_sdb()
domain = sdb.lookup(domain_name)
item = domain.get_item(item_name)
for section in item.keys():
if not self.has_section(section):
self.add_section(section)
d = json.loads(item[section])
for attr_name in d.keys():
attr_value = d[attr_name]
if attr_value is None:
attr_value = 'None'
if isinstance(attr_value, bool):
self.setbool(section, attr_name, attr_value)
else:
self.set(section, attr_name, attr_value)
| mit |
hamzehd/edx-platform | common/test/acceptance/fixtures/xqueue.py | 206 | 1402 | """
Fixture to configure XQueue response.
"""
import requests
import json
from . import XQUEUE_STUB_URL
class XQueueResponseFixtureError(Exception):
"""
Error occurred while configuring the stub XQueue.
"""
pass
class XQueueResponseFixture(object):
"""
Configure the XQueue stub's response to submissions.
"""
def __init__(self, pattern, response_dict):
"""
Configure XQueue stub to POST `response_dict` (a dictionary)
back to the LMS when it receives a submission that contains the string
`pattern`.
Remember that there is one XQueue stub shared by all the tests;
if possible, you should have tests use unique queue names
to avoid conflict between tests running in parallel.
"""
self._pattern = pattern
self._response_dict = response_dict
def install(self):
"""
Configure the stub via HTTP.
"""
url = XQUEUE_STUB_URL + "/set_config"
# Configure the stub to respond to submissions to our queue
payload = {self._pattern: json.dumps(self._response_dict)}
response = requests.put(url, data=payload)
if not response.ok:
raise XQueueResponseFixtureError(
"Could not configure XQueue stub for queue '{1}'. Status code: {2}".format(
self._pattern, self._response_dict))
| agpl-3.0 |
eRestin/MezzGIS | mezzanine/forms/tests.py | 6 | 1157 | from __future__ import unicode_literals
from django.test import TestCase
from mezzanine.core.models import CONTENT_STATUS_PUBLISHED
from mezzanine.forms import fields
from mezzanine.forms.models import Form
class TestsForm(TestCase):
def test_forms(self):
"""
Simple 200 status check against rendering and posting to forms
with both optional and required fields.
"""
for required in (True, False):
form = Form.objects.create(title="Form",
status=CONTENT_STATUS_PUBLISHED)
for (i, (field, _)) in enumerate(fields.NAMES):
form.fields.create(label="Field %s" % i, field_type=field,
required=required, visible=True)
response = self.client.get(form.get_absolute_url())
self.assertEqual(response.status_code, 200)
visible_fields = form.fields.visible()
data = dict([("field_%s" % f.id, "test") for f in visible_fields])
response = self.client.post(form.get_absolute_url(), data=data)
self.assertEqual(response.status_code, 200)
| bsd-2-clause |
hashems/Mobile-Cloud-Development-Projects | appengine/flexible/endpoints/main.py | 10 | 2861 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Cloud Endpoints sample application.
Demonstrates how to create a simple echo API as well as how to deal with
various authentication methods.
"""
import base64
import json
import logging
from flask import Flask, jsonify, request
from flask_cors import cross_origin
from six.moves import http_client
app = Flask(__name__)
def _base64_decode(encoded_str):
# Add paddings manually if necessary.
num_missed_paddings = 4 - len(encoded_str) % 4
if num_missed_paddings != 4:
encoded_str += b'=' * num_missed_paddings
return base64.b64decode(encoded_str).decode('utf-8')
@app.route('/echo', methods=['POST'])
def echo():
"""Simple echo service."""
message = request.get_json().get('message', '')
return jsonify({'message': message})
def auth_info():
"""Retrieves the authenication information from Google Cloud Endpoints."""
encoded_info = request.headers.get('X-Endpoint-API-UserInfo', None)
if encoded_info:
info_json = _base64_decode(encoded_info)
user_info = json.loads(info_json)
else:
user_info = {'id': 'anonymous'}
return jsonify(user_info)
@app.route('/auth/info/googlejwt', methods=['GET'])
def auth_info_google_jwt():
"""Auth info with Google signed JWT."""
return auth_info()
@app.route('/auth/info/googleidtoken', methods=['GET'])
def auth_info_google_id_token():
"""Auth info with Google ID token."""
return auth_info()
@app.route('/auth/info/firebase', methods=['GET'])
@cross_origin(send_wildcard=True)
def auth_info_firebase():
"""Auth info with Firebase auth."""
return auth_info()
@app.errorhandler(http_client.INTERNAL_SERVER_ERROR)
def unexpected_error(e):
"""Handle exceptions by returning swagger-compliant json."""
logging.exception('An error occured while processing the request.')
response = jsonify({
'code': http_client.INTERNAL_SERVER_ERROR,
'message': 'Exception: {}'.format(e)})
response.status_code = http_client.INTERNAL_SERVER_ERROR
return response
if __name__ == '__main__':
# This is used when running locally. Gunicorn is used to run the
# application on Google App Engine. See entrypoint in app.yaml.
app.run(host='127.0.0.1', port=8080, debug=True)
| apache-2.0 |
joerocklin/gem5 | src/cpu/o3/O3CPU.py | 4 | 6894 | # Copyright (c) 2005-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Kevin Lim
from m5.defines import buildEnv
from m5.params import *
from m5.proxy import *
from BaseCPU import BaseCPU
from FUPool import *
from O3Checker import O3Checker
from BranchPredictor import BranchPredictor
class DerivO3CPU(BaseCPU):
type = 'DerivO3CPU'
cxx_header = 'cpu/o3/deriv.hh'
@classmethod
def memory_mode(cls):
return 'timing'
@classmethod
def require_caches(cls):
return True
@classmethod
def support_take_over(cls):
return True
activity = Param.Unsigned(0, "Initial count")
cachePorts = Param.Unsigned(200, "Cache Ports")
decodeToFetchDelay = Param.Cycles(1, "Decode to fetch delay")
renameToFetchDelay = Param.Cycles(1 ,"Rename to fetch delay")
iewToFetchDelay = Param.Cycles(1, "Issue/Execute/Writeback to fetch "
"delay")
commitToFetchDelay = Param.Cycles(1, "Commit to fetch delay")
fetchWidth = Param.Unsigned(8, "Fetch width")
renameToDecodeDelay = Param.Cycles(1, "Rename to decode delay")
iewToDecodeDelay = Param.Cycles(1, "Issue/Execute/Writeback to decode "
"delay")
commitToDecodeDelay = Param.Cycles(1, "Commit to decode delay")
fetchToDecodeDelay = Param.Cycles(1, "Fetch to decode delay")
decodeWidth = Param.Unsigned(8, "Decode width")
iewToRenameDelay = Param.Cycles(1, "Issue/Execute/Writeback to rename "
"delay")
commitToRenameDelay = Param.Cycles(1, "Commit to rename delay")
decodeToRenameDelay = Param.Cycles(1, "Decode to rename delay")
renameWidth = Param.Unsigned(8, "Rename width")
commitToIEWDelay = Param.Cycles(1, "Commit to "
"Issue/Execute/Writeback delay")
renameToIEWDelay = Param.Cycles(2, "Rename to "
"Issue/Execute/Writeback delay")
issueToExecuteDelay = Param.Cycles(1, "Issue to execute delay (internal "
"to the IEW stage)")
dispatchWidth = Param.Unsigned(8, "Dispatch width")
issueWidth = Param.Unsigned(8, "Issue width")
wbWidth = Param.Unsigned(8, "Writeback width")
wbDepth = Param.Unsigned(1, "Writeback depth")
fuPool = Param.FUPool(DefaultFUPool(), "Functional Unit pool")
iewToCommitDelay = Param.Cycles(1, "Issue/Execute/Writeback to commit "
"delay")
renameToROBDelay = Param.Cycles(1, "Rename to reorder buffer delay")
commitWidth = Param.Unsigned(8, "Commit width")
squashWidth = Param.Unsigned(8, "Squash width")
trapLatency = Param.Cycles(13, "Trap latency")
fetchTrapLatency = Param.Cycles(1, "Fetch trap latency")
backComSize = Param.Unsigned(5, "Time buffer size for backwards communication")
forwardComSize = Param.Unsigned(5, "Time buffer size for forward communication")
LQEntries = Param.Unsigned(32, "Number of load queue entries")
SQEntries = Param.Unsigned(32, "Number of store queue entries")
LSQDepCheckShift = Param.Unsigned(4, "Number of places to shift addr before check")
LSQCheckLoads = Param.Bool(True,
"Should dependency violations be checked for loads & stores or just stores")
store_set_clear_period = Param.Unsigned(250000,
"Number of load/store insts before the dep predictor should be invalidated")
LFSTSize = Param.Unsigned(1024, "Last fetched store table size")
SSITSize = Param.Unsigned(1024, "Store set ID table size")
numRobs = Param.Unsigned(1, "Number of Reorder Buffers");
numPhysIntRegs = Param.Unsigned(256, "Number of physical integer registers")
numPhysFloatRegs = Param.Unsigned(256, "Number of physical floating point "
"registers")
numIQEntries = Param.Unsigned(64, "Number of instruction queue entries")
numROBEntries = Param.Unsigned(192, "Number of reorder buffer entries")
smtNumFetchingThreads = Param.Unsigned(1, "SMT Number of Fetching Threads")
smtFetchPolicy = Param.String('SingleThread', "SMT Fetch policy")
smtLSQPolicy = Param.String('Partitioned', "SMT LSQ Sharing Policy")
smtLSQThreshold = Param.Int(100, "SMT LSQ Threshold Sharing Parameter")
smtIQPolicy = Param.String('Partitioned', "SMT IQ Sharing Policy")
smtIQThreshold = Param.Int(100, "SMT IQ Threshold Sharing Parameter")
smtROBPolicy = Param.String('Partitioned', "SMT ROB Sharing Policy")
smtROBThreshold = Param.Int(100, "SMT ROB Threshold Sharing Parameter")
smtCommitPolicy = Param.String('RoundRobin', "SMT Commit Policy")
branchPred = BranchPredictor(numThreads = Parent.numThreads)
needsTSO = Param.Bool(buildEnv['TARGET_ISA'] == 'x86',
"Enable TSO Memory model")
def addCheckerCpu(self):
if buildEnv['TARGET_ISA'] in ['arm']:
from ArmTLB import ArmTLB
self.checker = O3Checker(workload=self.workload,
exitOnError=False,
updateOnError=True,
warnOnlyOnLoadError=True)
self.checker.itb = ArmTLB(size = self.itb.size)
self.checker.dtb = ArmTLB(size = self.dtb.size)
self.checker.cpu_id = self.cpu_id
else:
print "ERROR: Checker only supported under ARM ISA!"
exit(1)
| bsd-3-clause |
Distrotech/mozjs | js/src/python/mock-1.0.0/tests/testmock.py | 108 | 42396 | # Copyright (C) 2007-2012 Michael Foord & the mock team
# E-mail: fuzzyman AT voidspace DOT org DOT uk
# http://www.voidspace.org.uk/python/mock/
from tests.support import (
callable, unittest2, inPy3k, is_instance, next
)
import copy
import pickle
import sys
import mock
from mock import (
call, DEFAULT, patch, sentinel,
MagicMock, Mock, NonCallableMock,
NonCallableMagicMock, _CallList,
create_autospec
)
try:
unicode
except NameError:
unicode = str
class Iter(object):
def __init__(self):
self.thing = iter(['this', 'is', 'an', 'iter'])
def __iter__(self):
return self
def next(self):
return next(self.thing)
__next__ = next
class Subclass(MagicMock):
pass
class Thing(object):
attribute = 6
foo = 'bar'
class MockTest(unittest2.TestCase):
def test_all(self):
# if __all__ is badly defined then import * will raise an error
# We have to exec it because you can't import * inside a method
# in Python 3
exec("from mock import *")
def test_constructor(self):
mock = Mock()
self.assertFalse(mock.called, "called not initialised correctly")
self.assertEqual(mock.call_count, 0,
"call_count not initialised correctly")
self.assertTrue(is_instance(mock.return_value, Mock),
"return_value not initialised correctly")
self.assertEqual(mock.call_args, None,
"call_args not initialised correctly")
self.assertEqual(mock.call_args_list, [],
"call_args_list not initialised correctly")
self.assertEqual(mock.method_calls, [],
"method_calls not initialised correctly")
# Can't use hasattr for this test as it always returns True on a mock
self.assertFalse('_items' in mock.__dict__,
"default mock should not have '_items' attribute")
self.assertIsNone(mock._mock_parent,
"parent not initialised correctly")
self.assertIsNone(mock._mock_methods,
"methods not initialised correctly")
self.assertEqual(mock._mock_children, {},
"children not initialised incorrectly")
def test_unicode_not_broken(self):
# This used to raise an exception with Python 2.5 and Mock 0.4
unicode(Mock())
def test_return_value_in_constructor(self):
mock = Mock(return_value=None)
self.assertIsNone(mock.return_value,
"return value in constructor not honoured")
def test_repr(self):
mock = Mock(name='foo')
self.assertIn('foo', repr(mock))
self.assertIn("'%s'" % id(mock), repr(mock))
mocks = [(Mock(), 'mock'), (Mock(name='bar'), 'bar')]
for mock, name in mocks:
self.assertIn('%s.bar' % name, repr(mock.bar))
self.assertIn('%s.foo()' % name, repr(mock.foo()))
self.assertIn('%s.foo().bing' % name, repr(mock.foo().bing))
self.assertIn('%s()' % name, repr(mock()))
self.assertIn('%s()()' % name, repr(mock()()))
self.assertIn('%s()().foo.bar.baz().bing' % name,
repr(mock()().foo.bar.baz().bing))
def test_repr_with_spec(self):
class X(object):
pass
mock = Mock(spec=X)
self.assertIn(" spec='X' ", repr(mock))
mock = Mock(spec=X())
self.assertIn(" spec='X' ", repr(mock))
mock = Mock(spec_set=X)
self.assertIn(" spec_set='X' ", repr(mock))
mock = Mock(spec_set=X())
self.assertIn(" spec_set='X' ", repr(mock))
mock = Mock(spec=X, name='foo')
self.assertIn(" spec='X' ", repr(mock))
self.assertIn(" name='foo' ", repr(mock))
mock = Mock(name='foo')
self.assertNotIn("spec", repr(mock))
mock = Mock()
self.assertNotIn("spec", repr(mock))
mock = Mock(spec=['foo'])
self.assertNotIn("spec", repr(mock))
def test_side_effect(self):
mock = Mock()
def effect(*args, **kwargs):
raise SystemError('kablooie')
mock.side_effect = effect
self.assertRaises(SystemError, mock, 1, 2, fish=3)
mock.assert_called_with(1, 2, fish=3)
results = [1, 2, 3]
def effect():
return results.pop()
mock.side_effect = effect
self.assertEqual([mock(), mock(), mock()], [3, 2, 1],
"side effect not used correctly")
mock = Mock(side_effect=sentinel.SideEffect)
self.assertEqual(mock.side_effect, sentinel.SideEffect,
"side effect in constructor not used")
def side_effect():
return DEFAULT
mock = Mock(side_effect=side_effect, return_value=sentinel.RETURN)
self.assertEqual(mock(), sentinel.RETURN)
@unittest2.skipUnless('java' in sys.platform,
'This test only applies to Jython')
def test_java_exception_side_effect(self):
import java
mock = Mock(side_effect=java.lang.RuntimeException("Boom!"))
# can't use assertRaises with java exceptions
try:
mock(1, 2, fish=3)
except java.lang.RuntimeException:
pass
else:
self.fail('java exception not raised')
mock.assert_called_with(1,2, fish=3)
def test_reset_mock(self):
parent = Mock()
spec = ["something"]
mock = Mock(name="child", parent=parent, spec=spec)
mock(sentinel.Something, something=sentinel.SomethingElse)
something = mock.something
mock.something()
mock.side_effect = sentinel.SideEffect
return_value = mock.return_value
return_value()
mock.reset_mock()
self.assertEqual(mock._mock_name, "child",
"name incorrectly reset")
self.assertEqual(mock._mock_parent, parent,
"parent incorrectly reset")
self.assertEqual(mock._mock_methods, spec,
"methods incorrectly reset")
self.assertFalse(mock.called, "called not reset")
self.assertEqual(mock.call_count, 0, "call_count not reset")
self.assertEqual(mock.call_args, None, "call_args not reset")
self.assertEqual(mock.call_args_list, [], "call_args_list not reset")
self.assertEqual(mock.method_calls, [],
"method_calls not initialised correctly: %r != %r" %
(mock.method_calls, []))
self.assertEqual(mock.mock_calls, [])
self.assertEqual(mock.side_effect, sentinel.SideEffect,
"side_effect incorrectly reset")
self.assertEqual(mock.return_value, return_value,
"return_value incorrectly reset")
self.assertFalse(return_value.called, "return value mock not reset")
self.assertEqual(mock._mock_children, {'something': something},
"children reset incorrectly")
self.assertEqual(mock.something, something,
"children incorrectly cleared")
self.assertFalse(mock.something.called, "child not reset")
def test_reset_mock_recursion(self):
mock = Mock()
mock.return_value = mock
# used to cause recursion
mock.reset_mock()
def test_call(self):
mock = Mock()
self.assertTrue(is_instance(mock.return_value, Mock),
"Default return_value should be a Mock")
result = mock()
self.assertEqual(mock(), result,
"different result from consecutive calls")
mock.reset_mock()
ret_val = mock(sentinel.Arg)
self.assertTrue(mock.called, "called not set")
self.assertEqual(mock.call_count, 1, "call_count incoreect")
self.assertEqual(mock.call_args, ((sentinel.Arg,), {}),
"call_args not set")
self.assertEqual(mock.call_args_list, [((sentinel.Arg,), {})],
"call_args_list not initialised correctly")
mock.return_value = sentinel.ReturnValue
ret_val = mock(sentinel.Arg, key=sentinel.KeyArg)
self.assertEqual(ret_val, sentinel.ReturnValue,
"incorrect return value")
self.assertEqual(mock.call_count, 2, "call_count incorrect")
self.assertEqual(mock.call_args,
((sentinel.Arg,), {'key': sentinel.KeyArg}),
"call_args not set")
self.assertEqual(mock.call_args_list, [
((sentinel.Arg,), {}),
((sentinel.Arg,), {'key': sentinel.KeyArg})
],
"call_args_list not set")
def test_call_args_comparison(self):
mock = Mock()
mock()
mock(sentinel.Arg)
mock(kw=sentinel.Kwarg)
mock(sentinel.Arg, kw=sentinel.Kwarg)
self.assertEqual(mock.call_args_list, [
(),
((sentinel.Arg,),),
({"kw": sentinel.Kwarg},),
((sentinel.Arg,), {"kw": sentinel.Kwarg})
])
self.assertEqual(mock.call_args,
((sentinel.Arg,), {"kw": sentinel.Kwarg}))
def test_assert_called_with(self):
mock = Mock()
mock()
# Will raise an exception if it fails
mock.assert_called_with()
self.assertRaises(AssertionError, mock.assert_called_with, 1)
mock.reset_mock()
self.assertRaises(AssertionError, mock.assert_called_with)
mock(1, 2, 3, a='fish', b='nothing')
mock.assert_called_with(1, 2, 3, a='fish', b='nothing')
def test_assert_called_once_with(self):
mock = Mock()
mock()
# Will raise an exception if it fails
mock.assert_called_once_with()
mock()
self.assertRaises(AssertionError, mock.assert_called_once_with)
mock.reset_mock()
self.assertRaises(AssertionError, mock.assert_called_once_with)
mock('foo', 'bar', baz=2)
mock.assert_called_once_with('foo', 'bar', baz=2)
mock.reset_mock()
mock('foo', 'bar', baz=2)
self.assertRaises(
AssertionError,
lambda: mock.assert_called_once_with('bob', 'bar', baz=2)
)
def test_attribute_access_returns_mocks(self):
mock = Mock()
something = mock.something
self.assertTrue(is_instance(something, Mock), "attribute isn't a mock")
self.assertEqual(mock.something, something,
"different attributes returned for same name")
# Usage example
mock = Mock()
mock.something.return_value = 3
self.assertEqual(mock.something(), 3, "method returned wrong value")
self.assertTrue(mock.something.called,
"method didn't record being called")
def test_attributes_have_name_and_parent_set(self):
mock = Mock()
something = mock.something
self.assertEqual(something._mock_name, "something",
"attribute name not set correctly")
self.assertEqual(something._mock_parent, mock,
"attribute parent not set correctly")
def test_method_calls_recorded(self):
mock = Mock()
mock.something(3, fish=None)
mock.something_else.something(6, cake=sentinel.Cake)
self.assertEqual(mock.something_else.method_calls,
[("something", (6,), {'cake': sentinel.Cake})],
"method calls not recorded correctly")
self.assertEqual(mock.method_calls, [
("something", (3,), {'fish': None}),
("something_else.something", (6,), {'cake': sentinel.Cake})
],
"method calls not recorded correctly")
def test_method_calls_compare_easily(self):
mock = Mock()
mock.something()
self.assertEqual(mock.method_calls, [('something',)])
self.assertEqual(mock.method_calls, [('something', (), {})])
mock = Mock()
mock.something('different')
self.assertEqual(mock.method_calls, [('something', ('different',))])
self.assertEqual(mock.method_calls,
[('something', ('different',), {})])
mock = Mock()
mock.something(x=1)
self.assertEqual(mock.method_calls, [('something', {'x': 1})])
self.assertEqual(mock.method_calls, [('something', (), {'x': 1})])
mock = Mock()
mock.something('different', some='more')
self.assertEqual(mock.method_calls, [
('something', ('different',), {'some': 'more'})
])
def test_only_allowed_methods_exist(self):
for spec in ['something'], ('something',):
for arg in 'spec', 'spec_set':
mock = Mock(**{arg: spec})
# this should be allowed
mock.something
self.assertRaisesRegexp(
AttributeError,
"Mock object has no attribute 'something_else'",
getattr, mock, 'something_else'
)
def test_from_spec(self):
class Something(object):
x = 3
__something__ = None
def y(self):
pass
def test_attributes(mock):
# should work
mock.x
mock.y
mock.__something__
self.assertRaisesRegexp(
AttributeError,
"Mock object has no attribute 'z'",
getattr, mock, 'z'
)
self.assertRaisesRegexp(
AttributeError,
"Mock object has no attribute '__foobar__'",
getattr, mock, '__foobar__'
)
test_attributes(Mock(spec=Something))
test_attributes(Mock(spec=Something()))
def test_wraps_calls(self):
real = Mock()
mock = Mock(wraps=real)
self.assertEqual(mock(), real())
real.reset_mock()
mock(1, 2, fish=3)
real.assert_called_with(1, 2, fish=3)
def test_wraps_call_with_nondefault_return_value(self):
real = Mock()
mock = Mock(wraps=real)
mock.return_value = 3
self.assertEqual(mock(), 3)
self.assertFalse(real.called)
def test_wraps_attributes(self):
class Real(object):
attribute = Mock()
real = Real()
mock = Mock(wraps=real)
self.assertEqual(mock.attribute(), real.attribute())
self.assertRaises(AttributeError, lambda: mock.fish)
self.assertNotEqual(mock.attribute, real.attribute)
result = mock.attribute.frog(1, 2, fish=3)
Real.attribute.frog.assert_called_with(1, 2, fish=3)
self.assertEqual(result, Real.attribute.frog())
def test_exceptional_side_effect(self):
mock = Mock(side_effect=AttributeError)
self.assertRaises(AttributeError, mock)
mock = Mock(side_effect=AttributeError('foo'))
self.assertRaises(AttributeError, mock)
def test_baseexceptional_side_effect(self):
mock = Mock(side_effect=KeyboardInterrupt)
self.assertRaises(KeyboardInterrupt, mock)
mock = Mock(side_effect=KeyboardInterrupt('foo'))
self.assertRaises(KeyboardInterrupt, mock)
def test_assert_called_with_message(self):
mock = Mock()
self.assertRaisesRegexp(AssertionError, 'Not called',
mock.assert_called_with)
def test__name__(self):
mock = Mock()
self.assertRaises(AttributeError, lambda: mock.__name__)
mock.__name__ = 'foo'
self.assertEqual(mock.__name__, 'foo')
def test_spec_list_subclass(self):
class Sub(list):
pass
mock = Mock(spec=Sub(['foo']))
mock.append(3)
mock.append.assert_called_with(3)
self.assertRaises(AttributeError, getattr, mock, 'foo')
def test_spec_class(self):
class X(object):
pass
mock = Mock(spec=X)
self.assertTrue(isinstance(mock, X))
mock = Mock(spec=X())
self.assertTrue(isinstance(mock, X))
self.assertIs(mock.__class__, X)
self.assertEqual(Mock().__class__.__name__, 'Mock')
mock = Mock(spec_set=X)
self.assertTrue(isinstance(mock, X))
mock = Mock(spec_set=X())
self.assertTrue(isinstance(mock, X))
def test_setting_attribute_with_spec_set(self):
class X(object):
y = 3
mock = Mock(spec=X)
mock.x = 'foo'
mock = Mock(spec_set=X)
def set_attr():
mock.x = 'foo'
mock.y = 'foo'
self.assertRaises(AttributeError, set_attr)
def test_copy(self):
current = sys.getrecursionlimit()
self.addCleanup(sys.setrecursionlimit, current)
# can't use sys.maxint as this doesn't exist in Python 3
sys.setrecursionlimit(int(10e8))
# this segfaults without the fix in place
copy.copy(Mock())
@unittest2.skipIf(inPy3k, "no old style classes in Python 3")
def test_spec_old_style_classes(self):
class Foo:
bar = 7
mock = Mock(spec=Foo)
mock.bar = 6
self.assertRaises(AttributeError, lambda: mock.foo)
mock = Mock(spec=Foo())
mock.bar = 6
self.assertRaises(AttributeError, lambda: mock.foo)
@unittest2.skipIf(inPy3k, "no old style classes in Python 3")
def test_spec_set_old_style_classes(self):
class Foo:
bar = 7
mock = Mock(spec_set=Foo)
mock.bar = 6
self.assertRaises(AttributeError, lambda: mock.foo)
def _set():
mock.foo = 3
self.assertRaises(AttributeError, _set)
mock = Mock(spec_set=Foo())
mock.bar = 6
self.assertRaises(AttributeError, lambda: mock.foo)
def _set():
mock.foo = 3
self.assertRaises(AttributeError, _set)
def test_subclass_with_properties(self):
class SubClass(Mock):
def _get(self):
return 3
def _set(self, value):
raise NameError('strange error')
some_attribute = property(_get, _set)
s = SubClass(spec_set=SubClass)
self.assertEqual(s.some_attribute, 3)
def test():
s.some_attribute = 3
self.assertRaises(NameError, test)
def test():
s.foo = 'bar'
self.assertRaises(AttributeError, test)
def test_setting_call(self):
mock = Mock()
def __call__(self, a):
return self._mock_call(a)
type(mock).__call__ = __call__
mock('one')
mock.assert_called_with('one')
self.assertRaises(TypeError, mock, 'one', 'two')
@unittest2.skipUnless(sys.version_info[:2] >= (2, 6),
"__dir__ not available until Python 2.6 or later")
def test_dir(self):
mock = Mock()
attrs = set(dir(mock))
type_attrs = set([m for m in dir(Mock) if not m.startswith('_')])
# all public attributes from the type are included
self.assertEqual(set(), type_attrs - attrs)
# creates these attributes
mock.a, mock.b
self.assertIn('a', dir(mock))
self.assertIn('b', dir(mock))
# instance attributes
mock.c = mock.d = None
self.assertIn('c', dir(mock))
self.assertIn('d', dir(mock))
# magic methods
mock.__iter__ = lambda s: iter([])
self.assertIn('__iter__', dir(mock))
@unittest2.skipUnless(sys.version_info[:2] >= (2, 6),
"__dir__ not available until Python 2.6 or later")
def test_dir_from_spec(self):
mock = Mock(spec=unittest2.TestCase)
testcase_attrs = set(dir(unittest2.TestCase))
attrs = set(dir(mock))
# all attributes from the spec are included
self.assertEqual(set(), testcase_attrs - attrs)
# shadow a sys attribute
mock.version = 3
self.assertEqual(dir(mock).count('version'), 1)
@unittest2.skipUnless(sys.version_info[:2] >= (2, 6),
"__dir__ not available until Python 2.6 or later")
def test_filter_dir(self):
patcher = patch.object(mock, 'FILTER_DIR', False)
patcher.start()
try:
attrs = set(dir(Mock()))
type_attrs = set(dir(Mock))
# ALL attributes from the type are included
self.assertEqual(set(), type_attrs - attrs)
finally:
patcher.stop()
def test_configure_mock(self):
mock = Mock(foo='bar')
self.assertEqual(mock.foo, 'bar')
mock = MagicMock(foo='bar')
self.assertEqual(mock.foo, 'bar')
kwargs = {'side_effect': KeyError, 'foo.bar.return_value': 33,
'foo': MagicMock()}
mock = Mock(**kwargs)
self.assertRaises(KeyError, mock)
self.assertEqual(mock.foo.bar(), 33)
self.assertIsInstance(mock.foo, MagicMock)
mock = Mock()
mock.configure_mock(**kwargs)
self.assertRaises(KeyError, mock)
self.assertEqual(mock.foo.bar(), 33)
self.assertIsInstance(mock.foo, MagicMock)
def assertRaisesWithMsg(self, exception, message, func, *args, **kwargs):
# needed because assertRaisesRegex doesn't work easily with newlines
try:
func(*args, **kwargs)
except:
instance = sys.exc_info()[1]
self.assertIsInstance(instance, exception)
else:
self.fail('Exception %r not raised' % (exception,))
msg = str(instance)
self.assertEqual(msg, message)
def test_assert_called_with_failure_message(self):
mock = NonCallableMock()
expected = "mock(1, '2', 3, bar='foo')"
message = 'Expected call: %s\nNot called'
self.assertRaisesWithMsg(
AssertionError, message % (expected,),
mock.assert_called_with, 1, '2', 3, bar='foo'
)
mock.foo(1, '2', 3, foo='foo')
asserters = [
mock.foo.assert_called_with, mock.foo.assert_called_once_with
]
for meth in asserters:
actual = "foo(1, '2', 3, foo='foo')"
expected = "foo(1, '2', 3, bar='foo')"
message = 'Expected call: %s\nActual call: %s'
self.assertRaisesWithMsg(
AssertionError, message % (expected, actual),
meth, 1, '2', 3, bar='foo'
)
# just kwargs
for meth in asserters:
actual = "foo(1, '2', 3, foo='foo')"
expected = "foo(bar='foo')"
message = 'Expected call: %s\nActual call: %s'
self.assertRaisesWithMsg(
AssertionError, message % (expected, actual),
meth, bar='foo'
)
# just args
for meth in asserters:
actual = "foo(1, '2', 3, foo='foo')"
expected = "foo(1, 2, 3)"
message = 'Expected call: %s\nActual call: %s'
self.assertRaisesWithMsg(
AssertionError, message % (expected, actual),
meth, 1, 2, 3
)
# empty
for meth in asserters:
actual = "foo(1, '2', 3, foo='foo')"
expected = "foo()"
message = 'Expected call: %s\nActual call: %s'
self.assertRaisesWithMsg(
AssertionError, message % (expected, actual), meth
)
def test_mock_calls(self):
mock = MagicMock()
# need to do this because MagicMock.mock_calls used to just return
# a MagicMock which also returned a MagicMock when __eq__ was called
self.assertIs(mock.mock_calls == [], True)
mock = MagicMock()
mock()
expected = [('', (), {})]
self.assertEqual(mock.mock_calls, expected)
mock.foo()
expected.append(call.foo())
self.assertEqual(mock.mock_calls, expected)
# intermediate mock_calls work too
self.assertEqual(mock.foo.mock_calls, [('', (), {})])
mock = MagicMock()
mock().foo(1, 2, 3, a=4, b=5)
expected = [
('', (), {}), ('().foo', (1, 2, 3), dict(a=4, b=5))
]
self.assertEqual(mock.mock_calls, expected)
self.assertEqual(mock.return_value.foo.mock_calls,
[('', (1, 2, 3), dict(a=4, b=5))])
self.assertEqual(mock.return_value.mock_calls,
[('foo', (1, 2, 3), dict(a=4, b=5))])
mock = MagicMock()
mock().foo.bar().baz()
expected = [
('', (), {}), ('().foo.bar', (), {}),
('().foo.bar().baz', (), {})
]
self.assertEqual(mock.mock_calls, expected)
self.assertEqual(mock().mock_calls,
call.foo.bar().baz().call_list())
for kwargs in dict(), dict(name='bar'):
mock = MagicMock(**kwargs)
int(mock.foo)
expected = [('foo.__int__', (), {})]
self.assertEqual(mock.mock_calls, expected)
mock = MagicMock(**kwargs)
mock.a()()
expected = [('a', (), {}), ('a()', (), {})]
self.assertEqual(mock.mock_calls, expected)
self.assertEqual(mock.a().mock_calls, [call()])
mock = MagicMock(**kwargs)
mock(1)(2)(3)
self.assertEqual(mock.mock_calls, call(1)(2)(3).call_list())
self.assertEqual(mock().mock_calls, call(2)(3).call_list())
self.assertEqual(mock()().mock_calls, call(3).call_list())
mock = MagicMock(**kwargs)
mock(1)(2)(3).a.b.c(4)
self.assertEqual(mock.mock_calls,
call(1)(2)(3).a.b.c(4).call_list())
self.assertEqual(mock().mock_calls,
call(2)(3).a.b.c(4).call_list())
self.assertEqual(mock()().mock_calls,
call(3).a.b.c(4).call_list())
mock = MagicMock(**kwargs)
int(mock().foo.bar().baz())
last_call = ('().foo.bar().baz().__int__', (), {})
self.assertEqual(mock.mock_calls[-1], last_call)
self.assertEqual(mock().mock_calls,
call.foo.bar().baz().__int__().call_list())
self.assertEqual(mock().foo.bar().mock_calls,
call.baz().__int__().call_list())
self.assertEqual(mock().foo.bar().baz.mock_calls,
call().__int__().call_list())
def test_subclassing(self):
class Subclass(Mock):
pass
mock = Subclass()
self.assertIsInstance(mock.foo, Subclass)
self.assertIsInstance(mock(), Subclass)
class Subclass(Mock):
def _get_child_mock(self, **kwargs):
return Mock(**kwargs)
mock = Subclass()
self.assertNotIsInstance(mock.foo, Subclass)
self.assertNotIsInstance(mock(), Subclass)
def test_arg_lists(self):
mocks = [
Mock(),
MagicMock(),
NonCallableMock(),
NonCallableMagicMock()
]
def assert_attrs(mock):
names = 'call_args_list', 'method_calls', 'mock_calls'
for name in names:
attr = getattr(mock, name)
self.assertIsInstance(attr, _CallList)
self.assertIsInstance(attr, list)
self.assertEqual(attr, [])
for mock in mocks:
assert_attrs(mock)
if callable(mock):
mock()
mock(1, 2)
mock(a=3)
mock.reset_mock()
assert_attrs(mock)
mock.foo()
mock.foo.bar(1, a=3)
mock.foo(1).bar().baz(3)
mock.reset_mock()
assert_attrs(mock)
def test_call_args_two_tuple(self):
mock = Mock()
mock(1, a=3)
mock(2, b=4)
self.assertEqual(len(mock.call_args), 2)
args, kwargs = mock.call_args
self.assertEqual(args, (2,))
self.assertEqual(kwargs, dict(b=4))
expected_list = [((1,), dict(a=3)), ((2,), dict(b=4))]
for expected, call_args in zip(expected_list, mock.call_args_list):
self.assertEqual(len(call_args), 2)
self.assertEqual(expected[0], call_args[0])
self.assertEqual(expected[1], call_args[1])
def test_side_effect_iterator(self):
mock = Mock(side_effect=iter([1, 2, 3]))
self.assertEqual([mock(), mock(), mock()], [1, 2, 3])
self.assertRaises(StopIteration, mock)
mock = MagicMock(side_effect=['a', 'b', 'c'])
self.assertEqual([mock(), mock(), mock()], ['a', 'b', 'c'])
self.assertRaises(StopIteration, mock)
mock = Mock(side_effect='ghi')
self.assertEqual([mock(), mock(), mock()], ['g', 'h', 'i'])
self.assertRaises(StopIteration, mock)
class Foo(object):
pass
mock = MagicMock(side_effect=Foo)
self.assertIsInstance(mock(), Foo)
mock = Mock(side_effect=Iter())
self.assertEqual([mock(), mock(), mock(), mock()],
['this', 'is', 'an', 'iter'])
self.assertRaises(StopIteration, mock)
def test_side_effect_setting_iterator(self):
mock = Mock()
mock.side_effect = iter([1, 2, 3])
self.assertEqual([mock(), mock(), mock()], [1, 2, 3])
self.assertRaises(StopIteration, mock)
side_effect = mock.side_effect
self.assertIsInstance(side_effect, type(iter([])))
mock.side_effect = ['a', 'b', 'c']
self.assertEqual([mock(), mock(), mock()], ['a', 'b', 'c'])
self.assertRaises(StopIteration, mock)
side_effect = mock.side_effect
self.assertIsInstance(side_effect, type(iter([])))
this_iter = Iter()
mock.side_effect = this_iter
self.assertEqual([mock(), mock(), mock(), mock()],
['this', 'is', 'an', 'iter'])
self.assertRaises(StopIteration, mock)
self.assertIs(mock.side_effect, this_iter)
def test_side_effect_iterator_exceptions(self):
for Klass in Mock, MagicMock:
iterable = (ValueError, 3, KeyError, 6)
m = Klass(side_effect=iterable)
self.assertRaises(ValueError, m)
self.assertEqual(m(), 3)
self.assertRaises(KeyError, m)
self.assertEqual(m(), 6)
def test_assert_has_calls_any_order(self):
mock = Mock()
mock(1, 2)
mock(a=3)
mock(3, 4)
mock(b=6)
mock(b=6)
kalls = [
call(1, 2), ({'a': 3},),
((3, 4),), ((), {'a': 3}),
('', (1, 2)), ('', {'a': 3}),
('', (1, 2), {}), ('', (), {'a': 3})
]
for kall in kalls:
mock.assert_has_calls([kall], any_order=True)
for kall in call(1, '2'), call(b=3), call(), 3, None, 'foo':
self.assertRaises(
AssertionError, mock.assert_has_calls,
[kall], any_order=True
)
kall_lists = [
[call(1, 2), call(b=6)],
[call(3, 4), call(1, 2)],
[call(b=6), call(b=6)],
]
for kall_list in kall_lists:
mock.assert_has_calls(kall_list, any_order=True)
kall_lists = [
[call(b=6), call(b=6), call(b=6)],
[call(1, 2), call(1, 2)],
[call(3, 4), call(1, 2), call(5, 7)],
[call(b=6), call(3, 4), call(b=6), call(1, 2), call(b=6)],
]
for kall_list in kall_lists:
self.assertRaises(
AssertionError, mock.assert_has_calls,
kall_list, any_order=True
)
def test_assert_has_calls(self):
kalls1 = [
call(1, 2), ({'a': 3},),
((3, 4),), call(b=6),
('', (1,), {'b': 6}),
]
kalls2 = [call.foo(), call.bar(1)]
kalls2.extend(call.spam().baz(a=3).call_list())
kalls2.extend(call.bam(set(), foo={}).fish([1]).call_list())
mocks = []
for mock in Mock(), MagicMock():
mock(1, 2)
mock(a=3)
mock(3, 4)
mock(b=6)
mock(1, b=6)
mocks.append((mock, kalls1))
mock = Mock()
mock.foo()
mock.bar(1)
mock.spam().baz(a=3)
mock.bam(set(), foo={}).fish([1])
mocks.append((mock, kalls2))
for mock, kalls in mocks:
for i in range(len(kalls)):
for step in 1, 2, 3:
these = kalls[i:i+step]
mock.assert_has_calls(these)
if len(these) > 1:
self.assertRaises(
AssertionError,
mock.assert_has_calls,
list(reversed(these))
)
def test_assert_any_call(self):
mock = Mock()
mock(1, 2)
mock(a=3)
mock(1, b=6)
mock.assert_any_call(1, 2)
mock.assert_any_call(a=3)
mock.assert_any_call(1, b=6)
self.assertRaises(
AssertionError,
mock.assert_any_call
)
self.assertRaises(
AssertionError,
mock.assert_any_call,
1, 3
)
self.assertRaises(
AssertionError,
mock.assert_any_call,
a=4
)
def test_mock_calls_create_autospec(self):
def f(a, b):
pass
obj = Iter()
obj.f = f
funcs = [
create_autospec(f),
create_autospec(obj).f
]
for func in funcs:
func(1, 2)
func(3, 4)
self.assertEqual(
func.mock_calls, [call(1, 2), call(3, 4)]
)
def test_mock_add_spec(self):
class _One(object):
one = 1
class _Two(object):
two = 2
class Anything(object):
one = two = three = 'four'
klasses = [
Mock, MagicMock, NonCallableMock, NonCallableMagicMock
]
for Klass in list(klasses):
klasses.append(lambda K=Klass: K(spec=Anything))
klasses.append(lambda K=Klass: K(spec_set=Anything))
for Klass in klasses:
for kwargs in dict(), dict(spec_set=True):
mock = Klass()
#no error
mock.one, mock.two, mock.three
for One, Two in [(_One, _Two), (['one'], ['two'])]:
for kwargs in dict(), dict(spec_set=True):
mock.mock_add_spec(One, **kwargs)
mock.one
self.assertRaises(
AttributeError, getattr, mock, 'two'
)
self.assertRaises(
AttributeError, getattr, mock, 'three'
)
if 'spec_set' in kwargs:
self.assertRaises(
AttributeError, setattr, mock, 'three', None
)
mock.mock_add_spec(Two, **kwargs)
self.assertRaises(
AttributeError, getattr, mock, 'one'
)
mock.two
self.assertRaises(
AttributeError, getattr, mock, 'three'
)
if 'spec_set' in kwargs:
self.assertRaises(
AttributeError, setattr, mock, 'three', None
)
# note that creating a mock, setting an instance attribute, and
# *then* setting a spec doesn't work. Not the intended use case
def test_mock_add_spec_magic_methods(self):
for Klass in MagicMock, NonCallableMagicMock:
mock = Klass()
int(mock)
mock.mock_add_spec(object)
self.assertRaises(TypeError, int, mock)
mock = Klass()
mock['foo']
mock.__int__.return_value =4
mock.mock_add_spec(int)
self.assertEqual(int(mock), 4)
self.assertRaises(TypeError, lambda: mock['foo'])
def test_adding_child_mock(self):
for Klass in NonCallableMock, Mock, MagicMock, NonCallableMagicMock:
mock = Klass()
mock.foo = Mock()
mock.foo()
self.assertEqual(mock.method_calls, [call.foo()])
self.assertEqual(mock.mock_calls, [call.foo()])
mock = Klass()
mock.bar = Mock(name='name')
mock.bar()
self.assertEqual(mock.method_calls, [])
self.assertEqual(mock.mock_calls, [])
# mock with an existing _new_parent but no name
mock = Klass()
mock.baz = MagicMock()()
mock.baz()
self.assertEqual(mock.method_calls, [])
self.assertEqual(mock.mock_calls, [])
def test_adding_return_value_mock(self):
for Klass in Mock, MagicMock:
mock = Klass()
mock.return_value = MagicMock()
mock()()
self.assertEqual(mock.mock_calls, [call(), call()()])
def test_manager_mock(self):
class Foo(object):
one = 'one'
two = 'two'
manager = Mock()
p1 = patch.object(Foo, 'one')
p2 = patch.object(Foo, 'two')
mock_one = p1.start()
self.addCleanup(p1.stop)
mock_two = p2.start()
self.addCleanup(p2.stop)
manager.attach_mock(mock_one, 'one')
manager.attach_mock(mock_two, 'two')
Foo.two()
Foo.one()
self.assertEqual(manager.mock_calls, [call.two(), call.one()])
def test_magic_methods_mock_calls(self):
for Klass in Mock, MagicMock:
m = Klass()
m.__int__ = Mock(return_value=3)
m.__float__ = MagicMock(return_value=3.0)
int(m)
float(m)
self.assertEqual(m.mock_calls, [call.__int__(), call.__float__()])
self.assertEqual(m.method_calls, [])
def test_attribute_deletion(self):
# this behaviour isn't *useful*, but at least it's now tested...
for Klass in Mock, MagicMock, NonCallableMagicMock, NonCallableMock:
m = Klass()
original = m.foo
m.foo = 3
del m.foo
self.assertEqual(m.foo, original)
new = m.foo = Mock()
del m.foo
self.assertEqual(m.foo, new)
def test_mock_parents(self):
for Klass in Mock, MagicMock:
m = Klass()
original_repr = repr(m)
m.return_value = m
self.assertIs(m(), m)
self.assertEqual(repr(m), original_repr)
m.reset_mock()
self.assertIs(m(), m)
self.assertEqual(repr(m), original_repr)
m = Klass()
m.b = m.a
self.assertIn("name='mock.a'", repr(m.b))
self.assertIn("name='mock.a'", repr(m.a))
m.reset_mock()
self.assertIn("name='mock.a'", repr(m.b))
self.assertIn("name='mock.a'", repr(m.a))
m = Klass()
original_repr = repr(m)
m.a = m()
m.a.return_value = m
self.assertEqual(repr(m), original_repr)
self.assertEqual(repr(m.a()), original_repr)
def test_attach_mock(self):
classes = Mock, MagicMock, NonCallableMagicMock, NonCallableMock
for Klass in classes:
for Klass2 in classes:
m = Klass()
m2 = Klass2(name='foo')
m.attach_mock(m2, 'bar')
self.assertIs(m.bar, m2)
self.assertIn("name='mock.bar'", repr(m2))
m.bar.baz(1)
self.assertEqual(m.mock_calls, [call.bar.baz(1)])
self.assertEqual(m.method_calls, [call.bar.baz(1)])
def test_attach_mock_return_value(self):
classes = Mock, MagicMock, NonCallableMagicMock, NonCallableMock
for Klass in Mock, MagicMock:
for Klass2 in classes:
m = Klass()
m2 = Klass2(name='foo')
m.attach_mock(m2, 'return_value')
self.assertIs(m(), m2)
self.assertIn("name='mock()'", repr(m2))
m2.foo()
self.assertEqual(m.mock_calls, call().foo().call_list())
def test_attribute_deletion(self):
for mock in Mock(), MagicMock():
self.assertTrue(hasattr(mock, 'm'))
del mock.m
self.assertFalse(hasattr(mock, 'm'))
del mock.f
self.assertFalse(hasattr(mock, 'f'))
self.assertRaises(AttributeError, getattr, mock, 'f')
def test_class_assignable(self):
for mock in Mock(), MagicMock():
self.assertNotIsInstance(mock, int)
mock.__class__ = int
self.assertIsInstance(mock, int)
@unittest2.expectedFailure
def test_pickle(self):
for Klass in (MagicMock, Mock, Subclass, NonCallableMagicMock):
mock = Klass(name='foo', attribute=3)
mock.foo(1, 2, 3)
data = pickle.dumps(mock)
new = pickle.loads(data)
new.foo.assert_called_once_with(1, 2, 3)
self.assertFalse(new.called)
self.assertTrue(is_instance(new, Klass))
self.assertIsInstance(new, Thing)
self.assertIn('name="foo"', repr(new))
self.assertEqual(new.attribute, 3)
if __name__ == '__main__':
unittest2.main()
| mpl-2.0 |
A-deLuna/servo | tests/wpt/web-platform-tests/tools/html5lib/html5lib/filters/optionaltags.py | 1727 | 10500 | from __future__ import absolute_import, division, unicode_literals
from . import _base
class Filter(_base.Filter):
def slider(self):
previous1 = previous2 = None
for token in self.source:
if previous1 is not None:
yield previous2, previous1, token
previous2 = previous1
previous1 = token
yield previous2, previous1, None
def __iter__(self):
for previous, token, next in self.slider():
type = token["type"]
if type == "StartTag":
if (token["data"] or
not self.is_optional_start(token["name"], previous, next)):
yield token
elif type == "EndTag":
if not self.is_optional_end(token["name"], next):
yield token
else:
yield token
def is_optional_start(self, tagname, previous, next):
type = next and next["type"] or None
if tagname in 'html':
# An html element's start tag may be omitted if the first thing
# inside the html element is not a space character or a comment.
return type not in ("Comment", "SpaceCharacters")
elif tagname == 'head':
# A head element's start tag may be omitted if the first thing
# inside the head element is an element.
# XXX: we also omit the start tag if the head element is empty
if type in ("StartTag", "EmptyTag"):
return True
elif type == "EndTag":
return next["name"] == "head"
elif tagname == 'body':
# A body element's start tag may be omitted if the first thing
# inside the body element is not a space character or a comment,
# except if the first thing inside the body element is a script
# or style element and the node immediately preceding the body
# element is a head element whose end tag has been omitted.
if type in ("Comment", "SpaceCharacters"):
return False
elif type == "StartTag":
# XXX: we do not look at the preceding event, so we never omit
# the body element's start tag if it's followed by a script or
# a style element.
return next["name"] not in ('script', 'style')
else:
return True
elif tagname == 'colgroup':
# A colgroup element's start tag may be omitted if the first thing
# inside the colgroup element is a col element, and if the element
# is not immediately preceeded by another colgroup element whose
# end tag has been omitted.
if type in ("StartTag", "EmptyTag"):
# XXX: we do not look at the preceding event, so instead we never
# omit the colgroup element's end tag when it is immediately
# followed by another colgroup element. See is_optional_end.
return next["name"] == "col"
else:
return False
elif tagname == 'tbody':
# A tbody element's start tag may be omitted if the first thing
# inside the tbody element is a tr element, and if the element is
# not immediately preceeded by a tbody, thead, or tfoot element
# whose end tag has been omitted.
if type == "StartTag":
# omit the thead and tfoot elements' end tag when they are
# immediately followed by a tbody element. See is_optional_end.
if previous and previous['type'] == 'EndTag' and \
previous['name'] in ('tbody', 'thead', 'tfoot'):
return False
return next["name"] == 'tr'
else:
return False
return False
def is_optional_end(self, tagname, next):
type = next and next["type"] or None
if tagname in ('html', 'head', 'body'):
# An html element's end tag may be omitted if the html element
# is not immediately followed by a space character or a comment.
return type not in ("Comment", "SpaceCharacters")
elif tagname in ('li', 'optgroup', 'tr'):
# A li element's end tag may be omitted if the li element is
# immediately followed by another li element or if there is
# no more content in the parent element.
# An optgroup element's end tag may be omitted if the optgroup
# element is immediately followed by another optgroup element,
# or if there is no more content in the parent element.
# A tr element's end tag may be omitted if the tr element is
# immediately followed by another tr element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] == tagname
else:
return type == "EndTag" or type is None
elif tagname in ('dt', 'dd'):
# A dt element's end tag may be omitted if the dt element is
# immediately followed by another dt element or a dd element.
# A dd element's end tag may be omitted if the dd element is
# immediately followed by another dd element or a dt element,
# or if there is no more content in the parent element.
if type == "StartTag":
return next["name"] in ('dt', 'dd')
elif tagname == 'dd':
return type == "EndTag" or type is None
else:
return False
elif tagname == 'p':
# A p element's end tag may be omitted if the p element is
# immediately followed by an address, article, aside,
# blockquote, datagrid, dialog, dir, div, dl, fieldset,
# footer, form, h1, h2, h3, h4, h5, h6, header, hr, menu,
# nav, ol, p, pre, section, table, or ul, element, or if
# there is no more content in the parent element.
if type in ("StartTag", "EmptyTag"):
return next["name"] in ('address', 'article', 'aside',
'blockquote', 'datagrid', 'dialog',
'dir', 'div', 'dl', 'fieldset', 'footer',
'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
'header', 'hr', 'menu', 'nav', 'ol',
'p', 'pre', 'section', 'table', 'ul')
else:
return type == "EndTag" or type is None
elif tagname == 'option':
# An option element's end tag may be omitted if the option
# element is immediately followed by another option element,
# or if it is immediately followed by an <code>optgroup</code>
# element, or if there is no more content in the parent
# element.
if type == "StartTag":
return next["name"] in ('option', 'optgroup')
else:
return type == "EndTag" or type is None
elif tagname in ('rt', 'rp'):
# An rt element's end tag may be omitted if the rt element is
# immediately followed by an rt or rp element, or if there is
# no more content in the parent element.
# An rp element's end tag may be omitted if the rp element is
# immediately followed by an rt or rp element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] in ('rt', 'rp')
else:
return type == "EndTag" or type is None
elif tagname == 'colgroup':
# A colgroup element's end tag may be omitted if the colgroup
# element is not immediately followed by a space character or
# a comment.
if type in ("Comment", "SpaceCharacters"):
return False
elif type == "StartTag":
# XXX: we also look for an immediately following colgroup
# element. See is_optional_start.
return next["name"] != 'colgroup'
else:
return True
elif tagname in ('thead', 'tbody'):
# A thead element's end tag may be omitted if the thead element
# is immediately followed by a tbody or tfoot element.
# A tbody element's end tag may be omitted if the tbody element
# is immediately followed by a tbody or tfoot element, or if
# there is no more content in the parent element.
# A tfoot element's end tag may be omitted if the tfoot element
# is immediately followed by a tbody element, or if there is no
# more content in the parent element.
# XXX: we never omit the end tag when the following element is
# a tbody. See is_optional_start.
if type == "StartTag":
return next["name"] in ['tbody', 'tfoot']
elif tagname == 'tbody':
return type == "EndTag" or type is None
else:
return False
elif tagname == 'tfoot':
# A tfoot element's end tag may be omitted if the tfoot element
# is immediately followed by a tbody element, or if there is no
# more content in the parent element.
# XXX: we never omit the end tag when the following element is
# a tbody. See is_optional_start.
if type == "StartTag":
return next["name"] == 'tbody'
else:
return type == "EndTag" or type is None
elif tagname in ('td', 'th'):
# A td element's end tag may be omitted if the td element is
# immediately followed by a td or th element, or if there is
# no more content in the parent element.
# A th element's end tag may be omitted if the th element is
# immediately followed by a td or th element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] in ('td', 'th')
else:
return type == "EndTag" or type is None
return False
| mpl-2.0 |
aronsky/home-assistant | homeassistant/components/switch/vultr.py | 7 | 3608 | """
Support for interacting with Vultr subscriptions.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/switch.vultr/
"""
import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.const import CONF_NAME
from homeassistant.components.switch import (SwitchDevice, PLATFORM_SCHEMA)
from homeassistant.components.vultr import (
CONF_SUBSCRIPTION, ATTR_AUTO_BACKUPS, ATTR_ALLOWED_BANDWIDTH,
ATTR_CREATED_AT, ATTR_SUBSCRIPTION_ID, ATTR_SUBSCRIPTION_NAME,
ATTR_IPV4_ADDRESS, ATTR_IPV6_ADDRESS, ATTR_MEMORY, ATTR_DISK,
ATTR_COST_PER_MONTH, ATTR_OS, ATTR_REGION, ATTR_VCPUS, DATA_VULTR)
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'Vultr {}'
DEPENDENCIES = ['vultr']
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_SUBSCRIPTION): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Vultr subscription switch."""
vultr = hass.data[DATA_VULTR]
subscription = config.get(CONF_SUBSCRIPTION)
name = config.get(CONF_NAME)
if subscription not in vultr.data:
_LOGGER.error("Subscription %s not found", subscription)
return False
add_entities([VultrSwitch(vultr, subscription, name)], True)
class VultrSwitch(SwitchDevice):
"""Representation of a Vultr subscription switch."""
def __init__(self, vultr, subscription, name):
"""Initialize a new Vultr switch."""
self._vultr = vultr
self._name = name
self.subscription = subscription
self.data = None
@property
def name(self):
"""Return the name of the switch."""
try:
return self._name.format(self.data['label'])
except (TypeError, KeyError):
return self._name
@property
def is_on(self):
"""Return true if switch is on."""
return self.data['power_status'] == 'running'
@property
def icon(self):
"""Return the icon of this server."""
return 'mdi:server' if self.is_on else 'mdi:server-off'
@property
def device_state_attributes(self):
"""Return the state attributes of the Vultr subscription."""
return {
ATTR_ALLOWED_BANDWIDTH: self.data.get('allowed_bandwidth_gb'),
ATTR_AUTO_BACKUPS: self.data.get('auto_backups'),
ATTR_COST_PER_MONTH: self.data.get('cost_per_month'),
ATTR_CREATED_AT: self.data.get('date_created'),
ATTR_DISK: self.data.get('disk'),
ATTR_IPV4_ADDRESS: self.data.get('main_ip'),
ATTR_IPV6_ADDRESS: self.data.get('v6_main_ip'),
ATTR_MEMORY: self.data.get('ram'),
ATTR_OS: self.data.get('os'),
ATTR_REGION: self.data.get('location'),
ATTR_SUBSCRIPTION_ID: self.data.get('SUBID'),
ATTR_SUBSCRIPTION_NAME: self.data.get('label'),
ATTR_VCPUS: self.data.get('vcpu_count'),
}
def turn_on(self, **kwargs):
"""Boot-up the subscription."""
if self.data['power_status'] != 'running':
self._vultr.start(self.subscription)
def turn_off(self, **kwargs):
"""Halt the subscription."""
if self.data['power_status'] == 'running':
self._vultr.halt(self.subscription)
def update(self):
"""Get the latest data from the device and update the data."""
self._vultr.update()
self.data = self._vultr.data[self.subscription]
| apache-2.0 |
mattcaldwell/boto | boto/ec2/autoscale/request.py | 68 | 1549 | # Copyright (c) 2009 Reza Lotun http://reza.lotun.name/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class Request(object):
def __init__(self, connection=None):
self.connection = connection
self.request_id = ''
def __repr__(self):
return 'Request:%s' % self.request_id
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'RequestId':
self.request_id = value
else:
setattr(self, name, value)
| mit |
playm2mboy/edx-platform | lms/djangoapps/bulk_email/migrations/0010_auto__chg_field_optout_course_id__add_field_courseemail_template_name_.py | 120 | 8430 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Optout.course_id'
db.alter_column('bulk_email_optout', 'course_id', self.gf('xmodule_django.models.CourseKeyField')(max_length=255))
# Adding field 'CourseEmail.template_name'
db.add_column('bulk_email_courseemail', 'template_name',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True),
keep_default=False)
# Adding field 'CourseEmail.from_addr'
db.add_column('bulk_email_courseemail', 'from_addr',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True),
keep_default=False)
# Changing field 'CourseEmail.course_id'
db.alter_column('bulk_email_courseemail', 'course_id', self.gf('xmodule_django.models.CourseKeyField')(max_length=255))
# Adding field 'CourseEmailTemplate.name'
db.add_column('bulk_email_courseemailtemplate', 'name',
self.gf('django.db.models.fields.CharField')(max_length=255, unique=True, null=True),
keep_default=False)
# Changing field 'CourseAuthorization.course_id'
db.alter_column('bulk_email_courseauthorization', 'course_id', self.gf('xmodule_django.models.CourseKeyField')(unique=True, max_length=255))
def backwards(self, orm):
# Changing field 'Optout.course_id'
db.alter_column('bulk_email_optout', 'course_id', self.gf('django.db.models.fields.CharField')(max_length=255))
# Deleting field 'CourseEmail.template_name'
db.delete_column('bulk_email_courseemail', 'template_name')
# Deleting field 'CourseEmail.from_addr'
db.delete_column('bulk_email_courseemail', 'from_addr')
# Changing field 'CourseEmail.course_id'
db.alter_column('bulk_email_courseemail', 'course_id', self.gf('django.db.models.fields.CharField')(max_length=255))
# Deleting field 'CourseEmailTemplate.name'
db.delete_column('bulk_email_courseemailtemplate', 'name')
# Changing field 'CourseAuthorization.course_id'
db.alter_column('bulk_email_courseauthorization', 'course_id', self.gf('django.db.models.fields.CharField')(max_length=255, unique=True))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'bulk_email.courseauthorization': {
'Meta': {'object_name': 'CourseAuthorization'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'email_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'bulk_email.courseemail': {
'Meta': {'object_name': 'CourseEmail'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'from_addr': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'html_message': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'sender': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'template_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'text_message': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'to_option': ('django.db.models.fields.CharField', [], {'default': "'myself'", 'max_length': '64'})
},
'bulk_email.courseemailtemplate': {
'Meta': {'object_name': 'CourseEmailTemplate'},
'html_template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'unique': 'True', 'null': 'True'}),
'plain_template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'bulk_email.optout': {
'Meta': {'unique_together': "(('user', 'course_id'),)", 'object_name': 'Optout'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['bulk_email']
| agpl-3.0 |
HopeFOAM/HopeFOAM | ThirdParty-0.1/ParaView-5.0.1/VTK/ThirdParty/Twisted/twisted/python/compat.py | 22 | 11514 | # -*- test-case-name: twisted.test.test_compat -*-
#
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Compatibility module to provide backwards compatibility for useful Python
features.
This is mainly for use of internal Twisted code. We encourage you to use
the latest version of Python directly from your code, if possible.
@var unicode: The type of Unicode strings, C{unicode} on Python 2 and C{str}
on Python 3.
@var NativeStringIO: An in-memory file-like object that operates on the native
string type (bytes in Python 2, unicode in Python 3).
"""
from __future__ import division
import sys, string, socket, struct
if sys.version_info < (3, 0):
_PY3 = False
else:
_PY3 = True
def inet_pton(af, addr):
if af == socket.AF_INET:
return socket.inet_aton(addr)
elif af == getattr(socket, 'AF_INET6', 'AF_INET6'):
if [x for x in addr if x not in string.hexdigits + ':.']:
raise ValueError("Illegal characters: %r" % (''.join(x),))
parts = addr.split(':')
elided = parts.count('')
ipv4Component = '.' in parts[-1]
if len(parts) > (8 - ipv4Component) or elided > 3:
raise ValueError("Syntactically invalid address")
if elided == 3:
return '\x00' * 16
if elided:
zeros = ['0'] * (8 - len(parts) - ipv4Component + elided)
if addr.startswith('::'):
parts[:2] = zeros
elif addr.endswith('::'):
parts[-2:] = zeros
else:
idx = parts.index('')
parts[idx:idx+1] = zeros
if len(parts) != 8 - ipv4Component:
raise ValueError("Syntactically invalid address")
else:
if len(parts) != (8 - ipv4Component):
raise ValueError("Syntactically invalid address")
if ipv4Component:
if parts[-1].count('.') != 3:
raise ValueError("Syntactically invalid address")
rawipv4 = socket.inet_aton(parts[-1])
unpackedipv4 = struct.unpack('!HH', rawipv4)
parts[-1:] = [hex(x)[2:] for x in unpackedipv4]
parts = [int(x, 16) for x in parts]
return struct.pack('!8H', *parts)
else:
raise socket.error(97, 'Address family not supported by protocol')
def inet_ntop(af, addr):
if af == socket.AF_INET:
return socket.inet_ntoa(addr)
elif af == socket.AF_INET6:
if len(addr) != 16:
raise ValueError("address length incorrect")
parts = struct.unpack('!8H', addr)
curBase = bestBase = None
for i in range(8):
if not parts[i]:
if curBase is None:
curBase = i
curLen = 0
curLen += 1
else:
if curBase is not None:
if bestBase is None or curLen > bestLen:
bestBase = curBase
bestLen = curLen
curBase = None
if curBase is not None and (bestBase is None or curLen > bestLen):
bestBase = curBase
bestLen = curLen
parts = [hex(x)[2:] for x in parts]
if bestBase is not None:
parts[bestBase:bestBase + bestLen] = ['']
if parts[0] == '':
parts.insert(0, '')
if parts[-1] == '':
parts.insert(len(parts) - 1, '')
return ':'.join(parts)
else:
raise socket.error(97, 'Address family not supported by protocol')
try:
socket.AF_INET6
except AttributeError:
socket.AF_INET6 = 'AF_INET6'
try:
socket.inet_pton(socket.AF_INET6, "::")
except (AttributeError, NameError, socket.error):
socket.inet_pton = inet_pton
socket.inet_ntop = inet_ntop
adict = dict
if _PY3:
# These are actually useless in Python 2 as well, but we need to go
# through deprecation process there (ticket #5895):
del adict, inet_pton, inet_ntop
set = set
frozenset = frozenset
try:
from functools import reduce
except ImportError:
reduce = reduce
def execfile(filename, globals, locals=None):
"""
Execute a Python script in the given namespaces.
Similar to the execfile builtin, but a namespace is mandatory, partly
because that's a sensible thing to require, and because otherwise we'd
have to do some frame hacking.
This is a compatibility implementation for Python 3 porting, to avoid the
use of the deprecated builtin C{execfile} function.
"""
if locals is None:
locals = globals
fin = open(filename, "rbU")
try:
source = fin.read()
finally:
fin.close()
code = compile(source, filename, "exec")
exec(code, globals, locals)
try:
cmp = cmp
except NameError:
def cmp(a, b):
"""
Compare two objects.
Returns a negative number if C{a < b}, zero if they are equal, and a
positive number if C{a > b}.
"""
if a < b:
return -1
elif a == b:
return 0
else:
return 1
def comparable(klass):
"""
Class decorator that ensures support for the special C{__cmp__} method.
On Python 2 this does nothing.
On Python 3, C{__eq__}, C{__lt__}, etc. methods are added to the class,
relying on C{__cmp__} to implement their comparisons.
"""
# On Python 2, __cmp__ will just work, so no need to add extra methods:
if not _PY3:
return klass
def __eq__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c == 0
def __ne__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c != 0
def __lt__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c < 0
def __le__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c <= 0
def __gt__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c > 0
def __ge__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c >= 0
klass.__lt__ = __lt__
klass.__gt__ = __gt__
klass.__le__ = __le__
klass.__ge__ = __ge__
klass.__eq__ = __eq__
klass.__ne__ = __ne__
return klass
if _PY3:
unicode = str
else:
unicode = unicode
def nativeString(s):
"""
Convert C{bytes} or C{unicode} to the native C{str} type, using ASCII
encoding if conversion is necessary.
@raise UnicodeError: The input string is not ASCII encodable/decodable.
@raise TypeError: The input is neither C{bytes} nor C{unicode}.
"""
if not isinstance(s, (bytes, unicode)):
raise TypeError("%r is neither bytes nor unicode" % s)
if _PY3:
if isinstance(s, bytes):
return s.decode("ascii")
else:
# Ensure we're limited to ASCII subset:
s.encode("ascii")
else:
if isinstance(s, unicode):
return s.encode("ascii")
else:
# Ensure we're limited to ASCII subset:
s.decode("ascii")
return s
if _PY3:
def reraise(exception, traceback):
raise exception.with_traceback(traceback)
else:
exec("""def reraise(exception, traceback):
raise exception.__class__, exception, traceback""")
reraise.__doc__ = """
Re-raise an exception, with an optional traceback, in a way that is compatible
with both Python 2 and Python 3.
Note that on Python 3, re-raised exceptions will be mutated, with their
C{__traceback__} attribute being set.
@param exception: The exception instance.
@param traceback: The traceback to use, or C{None} indicating a new traceback.
"""
if _PY3:
from io import StringIO as NativeStringIO
else:
from io import BytesIO as NativeStringIO
# Functions for dealing with Python 3's bytes type, which is somewhat
# different than Python 2's:
if _PY3:
def iterbytes(originalBytes):
for i in range(len(originalBytes)):
yield originalBytes[i:i+1]
def intToBytes(i):
return ("%d" % i).encode("ascii")
# Ideally we would use memoryview, but it has a number of differences from
# the Python 2 buffer() that make that impractical
# (http://bugs.python.org/issue15945, incompatiblity with pyOpenSSL due to
# PyArg_ParseTuple differences.)
def lazyByteSlice(object, offset=0, size=None):
"""
Return a copy of the given bytes-like object.
If an offset is given, the copy starts at that offset. If a size is
given, the copy will only be of that length.
@param object: C{bytes} to be copied.
@param offset: C{int}, starting index of copy.
@param size: Optional, if an C{int} is given limit the length of copy
to this size.
"""
if size is None:
return object[offset:]
else:
return object[offset:(offset + size)]
def networkString(s):
if not isinstance(s, unicode):
raise TypeError("Can only convert text to bytes on Python 3")
return s.encode('ascii')
else:
def iterbytes(originalBytes):
return originalBytes
def intToBytes(i):
return b"%d" % i
lazyByteSlice = buffer
def networkString(s):
if not isinstance(s, str):
raise TypeError("Can only pass-through bytes on Python 2")
# Ensure we're limited to ASCII subset:
s.decode('ascii')
return s
iterbytes.__doc__ = """
Return an iterable wrapper for a C{bytes} object that provides the behavior of
iterating over C{bytes} on Python 2.
In particular, the results of iteration are the individual bytes (rather than
integers as on Python 3).
@param originalBytes: A C{bytes} object that will be wrapped.
"""
intToBytes.__doc__ = """
Convert the given integer into C{bytes}, as ASCII-encoded Arab numeral.
In other words, this is equivalent to calling C{bytes} in Python 2 on an
integer.
@param i: The C{int} to convert to C{bytes}.
@rtype: C{bytes}
"""
networkString.__doc__ = """
Convert the native string type to C{bytes} if it is not already C{bytes} using
ASCII encoding if conversion is necessary.
This is useful for sending text-like bytes that are constructed using string
interpolation. For example, this is safe on Python 2 and Python 3:
networkString("Hello %d" % (n,))
@param s: A native string to convert to bytes if necessary.
@type s: C{str}
@raise UnicodeError: The input string is not ASCII encodable/decodable.
@raise TypeError: The input is neither C{bytes} nor C{unicode}.
@rtype: C{bytes}
"""
try:
StringType = basestring
except NameError:
# Python 3+
StringType = str
try:
from types import InstanceType
except ImportError:
# Python 3+
InstanceType = object
try:
from types import FileType
except ImportError:
from io import IOBase
# Python 3+
FileType = IOBase
__all__ = [
"reraise",
"execfile",
"frozenset",
"reduce",
"set",
"cmp",
"comparable",
"nativeString",
"NativeStringIO",
"networkString",
"unicode",
"iterbytes",
"intToBytes",
"lazyByteSlice",
"StringType",
"InstanceType",
"FileType",
]
| gpl-3.0 |
Architektor/PySnip | venv/lib/python2.7/site-packages/pip/_vendor/requests/adapters.py | 149 | 18680 | # -*- coding: utf-8 -*-
"""
requests.adapters
~~~~~~~~~~~~~~~~~
This module contains the transport adapters that Requests uses to define
and maintain connections.
"""
import os.path
import socket
from .models import Response
from .packages.urllib3.poolmanager import PoolManager, proxy_from_url
from .packages.urllib3.response import HTTPResponse
from .packages.urllib3.util import Timeout as TimeoutSauce
from .packages.urllib3.util.retry import Retry
from .compat import urlparse, basestring
from .utils import (DEFAULT_CA_BUNDLE_PATH, get_encoding_from_headers,
prepend_scheme_if_needed, get_auth_from_url, urldefragauth,
select_proxy, to_native_string)
from .structures import CaseInsensitiveDict
from .packages.urllib3.exceptions import ClosedPoolError
from .packages.urllib3.exceptions import ConnectTimeoutError
from .packages.urllib3.exceptions import HTTPError as _HTTPError
from .packages.urllib3.exceptions import MaxRetryError
from .packages.urllib3.exceptions import NewConnectionError
from .packages.urllib3.exceptions import ProxyError as _ProxyError
from .packages.urllib3.exceptions import ProtocolError
from .packages.urllib3.exceptions import ReadTimeoutError
from .packages.urllib3.exceptions import SSLError as _SSLError
from .packages.urllib3.exceptions import ResponseError
from .cookies import extract_cookies_to_jar
from .exceptions import (ConnectionError, ConnectTimeout, ReadTimeout, SSLError,
ProxyError, RetryError, InvalidSchema)
from .auth import _basic_auth_str
try:
from .packages.urllib3.contrib.socks import SOCKSProxyManager
except ImportError:
def SOCKSProxyManager(*args, **kwargs):
raise InvalidSchema("Missing dependencies for SOCKS support.")
DEFAULT_POOLBLOCK = False
DEFAULT_POOLSIZE = 10
DEFAULT_RETRIES = 0
DEFAULT_POOL_TIMEOUT = None
class BaseAdapter(object):
"""The Base Transport Adapter"""
def __init__(self):
super(BaseAdapter, self).__init__()
def send(self):
raise NotImplementedError
def close(self):
raise NotImplementedError
class HTTPAdapter(BaseAdapter):
"""The built-in HTTP Adapter for urllib3.
Provides a general-case interface for Requests sessions to contact HTTP and
HTTPS urls by implementing the Transport Adapter interface. This class will
usually be created by the :class:`Session <Session>` class under the
covers.
:param pool_connections: The number of urllib3 connection pools to cache.
:param pool_maxsize: The maximum number of connections to save in the pool.
:param max_retries: The maximum number of retries each connection
should attempt. Note, this applies only to failed DNS lookups, socket
connections and connection timeouts, never to requests where data has
made it to the server. By default, Requests does not retry failed
connections. If you need granular control over the conditions under
which we retry a request, import urllib3's ``Retry`` class and pass
that instead.
:param pool_block: Whether the connection pool should block for connections.
Usage::
>>> import requests
>>> s = requests.Session()
>>> a = requests.adapters.HTTPAdapter(max_retries=3)
>>> s.mount('http://', a)
"""
__attrs__ = ['max_retries', 'config', '_pool_connections', '_pool_maxsize',
'_pool_block']
def __init__(self, pool_connections=DEFAULT_POOLSIZE,
pool_maxsize=DEFAULT_POOLSIZE, max_retries=DEFAULT_RETRIES,
pool_block=DEFAULT_POOLBLOCK):
if max_retries == DEFAULT_RETRIES:
self.max_retries = Retry(0, read=False)
else:
self.max_retries = Retry.from_int(max_retries)
self.config = {}
self.proxy_manager = {}
super(HTTPAdapter, self).__init__()
self._pool_connections = pool_connections
self._pool_maxsize = pool_maxsize
self._pool_block = pool_block
self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block)
def __getstate__(self):
return dict((attr, getattr(self, attr, None)) for attr in
self.__attrs__)
def __setstate__(self, state):
# Can't handle by adding 'proxy_manager' to self.__attrs__ because
# self.poolmanager uses a lambda function, which isn't pickleable.
self.proxy_manager = {}
self.config = {}
for attr, value in state.items():
setattr(self, attr, value)
self.init_poolmanager(self._pool_connections, self._pool_maxsize,
block=self._pool_block)
def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs):
"""Initializes a urllib3 PoolManager.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param connections: The number of urllib3 connection pools to cache.
:param maxsize: The maximum number of connections to save in the pool.
:param block: Block when no free connections are available.
:param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager.
"""
# save these values for pickling
self._pool_connections = connections
self._pool_maxsize = maxsize
self._pool_block = block
self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize,
block=block, strict=True, **pool_kwargs)
def proxy_manager_for(self, proxy, **proxy_kwargs):
"""Return urllib3 ProxyManager for the given proxy.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxy: The proxy to return a urllib3 ProxyManager for.
:param proxy_kwargs: Extra keyword arguments used to configure the Proxy Manager.
:returns: ProxyManager
"""
if proxy in self.proxy_manager:
manager = self.proxy_manager[proxy]
elif proxy.lower().startswith('socks'):
username, password = get_auth_from_url(proxy)
manager = self.proxy_manager[proxy] = SOCKSProxyManager(
proxy,
username=username,
password=password,
num_pools=self._pool_connections,
maxsize=self._pool_maxsize,
block=self._pool_block,
**proxy_kwargs
)
else:
proxy_headers = self.proxy_headers(proxy)
manager = self.proxy_manager[proxy] = proxy_from_url(
proxy,
proxy_headers=proxy_headers,
num_pools=self._pool_connections,
maxsize=self._pool_maxsize,
block=self._pool_block,
**proxy_kwargs)
return manager
def cert_verify(self, conn, url, verify, cert):
"""Verify a SSL certificate. This method should not be called from user
code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param conn: The urllib3 connection object associated with the cert.
:param url: The requested URL.
:param verify: Whether we should actually verify the certificate.
:param cert: The SSL certificate to verify.
"""
if url.lower().startswith('https') and verify:
cert_loc = None
# Allow self-specified cert location.
if verify is not True:
cert_loc = verify
if not cert_loc:
cert_loc = DEFAULT_CA_BUNDLE_PATH
if not cert_loc:
raise Exception("Could not find a suitable SSL CA certificate bundle.")
conn.cert_reqs = 'CERT_REQUIRED'
if not os.path.isdir(cert_loc):
conn.ca_certs = cert_loc
else:
conn.ca_cert_dir = cert_loc
else:
conn.cert_reqs = 'CERT_NONE'
conn.ca_certs = None
conn.ca_cert_dir = None
if cert:
if not isinstance(cert, basestring):
conn.cert_file = cert[0]
conn.key_file = cert[1]
else:
conn.cert_file = cert
def build_response(self, req, resp):
"""Builds a :class:`Response <requests.Response>` object from a urllib3
response. This should not be called from user code, and is only exposed
for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`
:param req: The :class:`PreparedRequest <PreparedRequest>` used to generate the response.
:param resp: The urllib3 response object.
"""
response = Response()
# Fallback to None if there's no status_code, for whatever reason.
response.status_code = getattr(resp, 'status', None)
# Make headers case-insensitive.
response.headers = CaseInsensitiveDict(getattr(resp, 'headers', {}))
# Set encoding.
response.encoding = get_encoding_from_headers(response.headers)
response.raw = resp
response.reason = response.raw.reason
if isinstance(req.url, bytes):
response.url = req.url.decode('utf-8')
else:
response.url = req.url
# Add new cookies from the server.
extract_cookies_to_jar(response.cookies, req, resp)
# Give the Response some context.
response.request = req
response.connection = self
return response
def get_connection(self, url, proxies=None):
"""Returns a urllib3 connection for the given URL. This should not be
called from user code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param url: The URL to connect to.
:param proxies: (optional) A Requests-style dictionary of proxies used on this request.
"""
proxy = select_proxy(url, proxies)
if proxy:
proxy = prepend_scheme_if_needed(proxy, 'http')
proxy_manager = self.proxy_manager_for(proxy)
conn = proxy_manager.connection_from_url(url)
else:
# Only scheme should be lower case
parsed = urlparse(url)
url = parsed.geturl()
conn = self.poolmanager.connection_from_url(url)
return conn
def close(self):
"""Disposes of any internal state.
Currently, this closes the PoolManager and any active ProxyManager,
which closes any pooled connections.
"""
self.poolmanager.clear()
for proxy in self.proxy_manager.values():
proxy.clear()
def request_url(self, request, proxies):
"""Obtain the url to use when making the final request.
If the message is being sent through a HTTP proxy, the full URL has to
be used. Otherwise, we should only use the path portion of the URL.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param proxies: A dictionary of schemes or schemes and hosts to proxy URLs.
"""
proxy = select_proxy(request.url, proxies)
scheme = urlparse(request.url).scheme
is_proxied_http_request = (proxy and scheme != 'https')
using_socks_proxy = False
if proxy:
proxy_scheme = urlparse(proxy).scheme.lower()
using_socks_proxy = proxy_scheme.startswith('socks')
url = request.path_url
if is_proxied_http_request and not using_socks_proxy:
url = urldefragauth(request.url)
return url
def add_headers(self, request, **kwargs):
"""Add any headers needed by the connection. As of v2.0 this does
nothing by default, but is left for overriding by users that subclass
the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` to add headers to.
:param kwargs: The keyword arguments from the call to send().
"""
pass
def proxy_headers(self, proxy):
"""Returns a dictionary of the headers to add to any request sent
through a proxy. This works with urllib3 magic to ensure that they are
correctly sent to the proxy, rather than in a tunnelled request if
CONNECT is being used.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxies: The url of the proxy being used for this request.
"""
headers = {}
username, password = get_auth_from_url(proxy)
if username and password:
headers['Proxy-Authorization'] = _basic_auth_str(username,
password)
return headers
def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):
"""Sends PreparedRequest object. Returns Response object.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param stream: (optional) Whether to stream the request content.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a :ref:`(connect timeout,
read timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param verify: (optional) Whether to verify SSL certificates.
:param cert: (optional) Any user-provided SSL certificate to be trusted.
:param proxies: (optional) The proxies dictionary to apply to the request.
"""
conn = self.get_connection(request.url, proxies)
self.cert_verify(conn, request.url, verify, cert)
url = self.request_url(request, proxies)
self.add_headers(request)
chunked = not (request.body is None or 'Content-Length' in request.headers)
if isinstance(timeout, tuple):
try:
connect, read = timeout
timeout = TimeoutSauce(connect=connect, read=read)
except ValueError as e:
# this may raise a string formatting error.
err = ("Invalid timeout {0}. Pass a (connect, read) "
"timeout tuple, or a single float to set "
"both timeouts to the same value".format(timeout))
raise ValueError(err)
else:
timeout = TimeoutSauce(connect=timeout, read=timeout)
try:
if not chunked:
resp = conn.urlopen(
method=request.method,
url=url,
body=request.body,
headers=request.headers,
redirect=False,
assert_same_host=False,
preload_content=False,
decode_content=False,
retries=self.max_retries,
timeout=timeout
)
# Send the request.
else:
if hasattr(conn, 'proxy_pool'):
conn = conn.proxy_pool
low_conn = conn._get_conn(timeout=DEFAULT_POOL_TIMEOUT)
try:
low_conn.putrequest(request.method,
url,
skip_accept_encoding=True)
for header, value in request.headers.items():
low_conn.putheader(header, value)
low_conn.endheaders()
for i in request.body:
low_conn.send(hex(len(i))[2:].encode('utf-8'))
low_conn.send(b'\r\n')
low_conn.send(i)
low_conn.send(b'\r\n')
low_conn.send(b'0\r\n\r\n')
# Receive the response from the server
try:
# For Python 2.7+ versions, use buffering of HTTP
# responses
r = low_conn.getresponse(buffering=True)
except TypeError:
# For compatibility with Python 2.6 versions and back
r = low_conn.getresponse()
resp = HTTPResponse.from_httplib(
r,
pool=conn,
connection=low_conn,
preload_content=False,
decode_content=False
)
except:
# If we hit any problems here, clean up the connection.
# Then, reraise so that we can handle the actual exception.
low_conn.close()
raise
except (ProtocolError, socket.error) as err:
raise ConnectionError(err, request=request)
except MaxRetryError as e:
if isinstance(e.reason, ConnectTimeoutError):
# TODO: Remove this in 3.0.0: see #2811
if not isinstance(e.reason, NewConnectionError):
raise ConnectTimeout(e, request=request)
if isinstance(e.reason, ResponseError):
raise RetryError(e, request=request)
if isinstance(e.reason, _ProxyError):
raise ProxyError(e, request=request)
raise ConnectionError(e, request=request)
except ClosedPoolError as e:
raise ConnectionError(e, request=request)
except _ProxyError as e:
raise ProxyError(e)
except (_SSLError, _HTTPError) as e:
if isinstance(e, _SSLError):
raise SSLError(e, request=request)
elif isinstance(e, ReadTimeoutError):
raise ReadTimeout(e, request=request)
else:
raise
return self.build_response(request, resp)
| gpl-3.0 |
mtconley/turntable | test/lib/python2.7/site-packages/numpy/doc/constants.py | 172 | 8954 | """
=========
Constants
=========
Numpy includes several constants:
%(constant_list)s
"""
#
# Note: the docstring is autogenerated.
#
from __future__ import division, absolute_import, print_function
import textwrap, re
# Maintain same format as in numpy.add_newdocs
constants = []
def add_newdoc(module, name, doc):
constants.append((name, doc))
add_newdoc('numpy', 'Inf',
"""
IEEE 754 floating point representation of (positive) infinity.
Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for
`inf`. For more details, see `inf`.
See Also
--------
inf
""")
add_newdoc('numpy', 'Infinity',
"""
IEEE 754 floating point representation of (positive) infinity.
Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for
`inf`. For more details, see `inf`.
See Also
--------
inf
""")
add_newdoc('numpy', 'NAN',
"""
IEEE 754 floating point representation of Not a Number (NaN).
`NaN` and `NAN` are equivalent definitions of `nan`. Please use
`nan` instead of `NAN`.
See Also
--------
nan
""")
add_newdoc('numpy', 'NINF',
"""
IEEE 754 floating point representation of negative infinity.
Returns
-------
y : float
A floating point representation of negative infinity.
See Also
--------
isinf : Shows which elements are positive or negative infinity
isposinf : Shows which elements are positive infinity
isneginf : Shows which elements are negative infinity
isnan : Shows which elements are Not a Number
isfinite : Shows which elements are finite (not one of Not a Number,
positive infinity and negative infinity)
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Also that positive infinity is not equivalent to negative infinity. But
infinity is equivalent to positive infinity.
Examples
--------
>>> np.NINF
-inf
>>> np.log(0)
-inf
""")
add_newdoc('numpy', 'NZERO',
"""
IEEE 754 floating point representation of negative zero.
Returns
-------
y : float
A floating point representation of negative zero.
See Also
--------
PZERO : Defines positive zero.
isinf : Shows which elements are positive or negative infinity.
isposinf : Shows which elements are positive infinity.
isneginf : Shows which elements are negative infinity.
isnan : Shows which elements are Not a Number.
isfinite : Shows which elements are finite - not one of
Not a Number, positive infinity and negative infinity.
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). Negative zero is considered to be a finite number.
Examples
--------
>>> np.NZERO
-0.0
>>> np.PZERO
0.0
>>> np.isfinite([np.NZERO])
array([ True], dtype=bool)
>>> np.isnan([np.NZERO])
array([False], dtype=bool)
>>> np.isinf([np.NZERO])
array([False], dtype=bool)
""")
add_newdoc('numpy', 'NaN',
"""
IEEE 754 floating point representation of Not a Number (NaN).
`NaN` and `NAN` are equivalent definitions of `nan`. Please use
`nan` instead of `NaN`.
See Also
--------
nan
""")
add_newdoc('numpy', 'PINF',
"""
IEEE 754 floating point representation of (positive) infinity.
Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for
`inf`. For more details, see `inf`.
See Also
--------
inf
""")
add_newdoc('numpy', 'PZERO',
"""
IEEE 754 floating point representation of positive zero.
Returns
-------
y : float
A floating point representation of positive zero.
See Also
--------
NZERO : Defines negative zero.
isinf : Shows which elements are positive or negative infinity.
isposinf : Shows which elements are positive infinity.
isneginf : Shows which elements are negative infinity.
isnan : Shows which elements are Not a Number.
isfinite : Shows which elements are finite - not one of
Not a Number, positive infinity and negative infinity.
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). Positive zero is considered to be a finite number.
Examples
--------
>>> np.PZERO
0.0
>>> np.NZERO
-0.0
>>> np.isfinite([np.PZERO])
array([ True], dtype=bool)
>>> np.isnan([np.PZERO])
array([False], dtype=bool)
>>> np.isinf([np.PZERO])
array([False], dtype=bool)
""")
add_newdoc('numpy', 'e',
"""
Euler's constant, base of natural logarithms, Napier's constant.
``e = 2.71828182845904523536028747135266249775724709369995...``
See Also
--------
exp : Exponential function
log : Natural logarithm
References
----------
.. [1] http://en.wikipedia.org/wiki/Napier_constant
""")
add_newdoc('numpy', 'inf',
"""
IEEE 754 floating point representation of (positive) infinity.
Returns
-------
y : float
A floating point representation of positive infinity.
See Also
--------
isinf : Shows which elements are positive or negative infinity
isposinf : Shows which elements are positive infinity
isneginf : Shows which elements are negative infinity
isnan : Shows which elements are Not a Number
isfinite : Shows which elements are finite (not one of Not a Number,
positive infinity and negative infinity)
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Also that positive infinity is not equivalent to negative infinity. But
infinity is equivalent to positive infinity.
`Inf`, `Infinity`, `PINF` and `infty` are aliases for `inf`.
Examples
--------
>>> np.inf
inf
>>> np.array([1]) / 0.
array([ Inf])
""")
add_newdoc('numpy', 'infty',
"""
IEEE 754 floating point representation of (positive) infinity.
Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for
`inf`. For more details, see `inf`.
See Also
--------
inf
""")
add_newdoc('numpy', 'nan',
"""
IEEE 754 floating point representation of Not a Number (NaN).
Returns
-------
y : A floating point representation of Not a Number.
See Also
--------
isnan : Shows which elements are Not a Number.
isfinite : Shows which elements are finite (not one of
Not a Number, positive infinity and negative infinity)
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
`NaN` and `NAN` are aliases of `nan`.
Examples
--------
>>> np.nan
nan
>>> np.log(-1)
nan
>>> np.log([-1, 1, 2])
array([ NaN, 0. , 0.69314718])
""")
add_newdoc('numpy', 'newaxis',
"""
A convenient alias for None, useful for indexing arrays.
See Also
--------
`numpy.doc.indexing`
Examples
--------
>>> newaxis is None
True
>>> x = np.arange(3)
>>> x
array([0, 1, 2])
>>> x[:, newaxis]
array([[0],
[1],
[2]])
>>> x[:, newaxis, newaxis]
array([[[0]],
[[1]],
[[2]]])
>>> x[:, newaxis] * x
array([[0, 0, 0],
[0, 1, 2],
[0, 2, 4]])
Outer product, same as ``outer(x, y)``:
>>> y = np.arange(3, 6)
>>> x[:, newaxis] * y
array([[ 0, 0, 0],
[ 3, 4, 5],
[ 6, 8, 10]])
``x[newaxis, :]`` is equivalent to ``x[newaxis]`` and ``x[None]``:
>>> x[newaxis, :].shape
(1, 3)
>>> x[newaxis].shape
(1, 3)
>>> x[None].shape
(1, 3)
>>> x[:, newaxis].shape
(3, 1)
""")
if __doc__:
constants_str = []
constants.sort()
for name, doc in constants:
s = textwrap.dedent(doc).replace("\n", "\n ")
# Replace sections by rubrics
lines = s.split("\n")
new_lines = []
for line in lines:
m = re.match(r'^(\s+)[-=]+\s*$', line)
if m and new_lines:
prev = textwrap.dedent(new_lines.pop())
new_lines.append('%s.. rubric:: %s' % (m.group(1), prev))
new_lines.append('')
else:
new_lines.append(line)
s = "\n".join(new_lines)
# Done.
constants_str.append(""".. const:: %s\n %s""" % (name, s))
constants_str = "\n".join(constants_str)
__doc__ = __doc__ % dict(constant_list=constants_str)
del constants_str, name, doc
del line, lines, new_lines, m, s, prev
del constants, add_newdoc
| mit |
gcblue/gcblue | bin/Lib/test/test_dictcomps.py | 93 | 3849 | import unittest
from test import test_support as support
# For scope testing.
g = "Global variable"
class DictComprehensionTest(unittest.TestCase):
def test_basics(self):
expected = {0: 10, 1: 11, 2: 12, 3: 13, 4: 14, 5: 15, 6: 16, 7: 17,
8: 18, 9: 19}
actual = {k: k + 10 for k in range(10)}
self.assertEqual(actual, expected)
expected = {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9}
actual = {k: v for k in range(10) for v in range(10) if k == v}
self.assertEqual(actual, expected)
def test_scope_isolation(self):
k = "Local Variable"
expected = {0: None, 1: None, 2: None, 3: None, 4: None, 5: None,
6: None, 7: None, 8: None, 9: None}
actual = {k: None for k in range(10)}
self.assertEqual(actual, expected)
self.assertEqual(k, "Local Variable")
expected = {9: 1, 18: 2, 19: 2, 27: 3, 28: 3, 29: 3, 36: 4, 37: 4,
38: 4, 39: 4, 45: 5, 46: 5, 47: 5, 48: 5, 49: 5, 54: 6,
55: 6, 56: 6, 57: 6, 58: 6, 59: 6, 63: 7, 64: 7, 65: 7,
66: 7, 67: 7, 68: 7, 69: 7, 72: 8, 73: 8, 74: 8, 75: 8,
76: 8, 77: 8, 78: 8, 79: 8, 81: 9, 82: 9, 83: 9, 84: 9,
85: 9, 86: 9, 87: 9, 88: 9, 89: 9}
actual = {k: v for v in range(10) for k in range(v * 9, v * 10)}
self.assertEqual(k, "Local Variable")
self.assertEqual(actual, expected)
def test_scope_isolation_from_global(self):
expected = {0: None, 1: None, 2: None, 3: None, 4: None, 5: None,
6: None, 7: None, 8: None, 9: None}
actual = {g: None for g in range(10)}
self.assertEqual(actual, expected)
self.assertEqual(g, "Global variable")
expected = {9: 1, 18: 2, 19: 2, 27: 3, 28: 3, 29: 3, 36: 4, 37: 4,
38: 4, 39: 4, 45: 5, 46: 5, 47: 5, 48: 5, 49: 5, 54: 6,
55: 6, 56: 6, 57: 6, 58: 6, 59: 6, 63: 7, 64: 7, 65: 7,
66: 7, 67: 7, 68: 7, 69: 7, 72: 8, 73: 8, 74: 8, 75: 8,
76: 8, 77: 8, 78: 8, 79: 8, 81: 9, 82: 9, 83: 9, 84: 9,
85: 9, 86: 9, 87: 9, 88: 9, 89: 9}
actual = {g: v for v in range(10) for g in range(v * 9, v * 10)}
self.assertEqual(g, "Global variable")
self.assertEqual(actual, expected)
def test_global_visibility(self):
expected = {0: 'Global variable', 1: 'Global variable',
2: 'Global variable', 3: 'Global variable',
4: 'Global variable', 5: 'Global variable',
6: 'Global variable', 7: 'Global variable',
8: 'Global variable', 9: 'Global variable'}
actual = {k: g for k in range(10)}
self.assertEqual(actual, expected)
def test_local_visibility(self):
v = "Local variable"
expected = {0: 'Local variable', 1: 'Local variable',
2: 'Local variable', 3: 'Local variable',
4: 'Local variable', 5: 'Local variable',
6: 'Local variable', 7: 'Local variable',
8: 'Local variable', 9: 'Local variable'}
actual = {k: v for k in range(10)}
self.assertEqual(actual, expected)
self.assertEqual(v, "Local variable")
def test_illegal_assignment(self):
with self.assertRaisesRegexp(SyntaxError, "can't assign"):
compile("{x: y for y, x in ((1, 2), (3, 4))} = 5", "<test>",
"exec")
with self.assertRaisesRegexp(SyntaxError, "can't assign"):
compile("{x: y for y, x in ((1, 2), (3, 4))} += 5", "<test>",
"exec")
def test_main():
support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
| bsd-3-clause |
sander76/home-assistant | homeassistant/components/sisyphus/media_player.py | 9 | 6557 | """Support for track controls on the Sisyphus Kinetic Art Table."""
import aiohttp
from sisyphus_control import Track
from homeassistant.components.media_player import MediaPlayerEntity
from homeassistant.components.media_player.const import (
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SHUFFLE_SET,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
)
from homeassistant.const import (
CONF_HOST,
STATE_IDLE,
STATE_OFF,
STATE_PAUSED,
STATE_PLAYING,
)
from homeassistant.exceptions import PlatformNotReady
from . import DATA_SISYPHUS
MEDIA_TYPE_TRACK = "sisyphus_track"
SUPPORTED_FEATURES = (
SUPPORT_VOLUME_MUTE
| SUPPORT_VOLUME_SET
| SUPPORT_TURN_OFF
| SUPPORT_TURN_ON
| SUPPORT_PAUSE
| SUPPORT_SHUFFLE_SET
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_NEXT_TRACK
| SUPPORT_PLAY
)
async def async_setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up a media player entity for a Sisyphus table."""
host = discovery_info[CONF_HOST]
try:
table_holder = hass.data[DATA_SISYPHUS][host]
table = await table_holder.get_table()
except aiohttp.ClientError as err:
raise PlatformNotReady() from err
add_entities([SisyphusPlayer(table_holder.name, host, table)], True)
class SisyphusPlayer(MediaPlayerEntity):
"""Representation of a Sisyphus table as a media player device."""
def __init__(self, name, host, table):
"""Initialize the Sisyphus media device."""
self._name = name
self._host = host
self._table = table
async def async_added_to_hass(self):
"""Add listeners after this object has been initialized."""
self._table.add_listener(self.async_write_ha_state)
async def async_update(self):
"""Force update table state."""
await self._table.refresh()
@property
def unique_id(self):
"""Return the UUID of the table."""
return self._table.id
@property
def available(self):
"""Return true if the table is responding to heartbeats."""
return self._table.is_connected
@property
def name(self):
"""Return the name of the table."""
return self._name
@property
def state(self):
"""Return the current state of the table; sleeping maps to off."""
if self._table.state in ["homing", "playing"]:
return STATE_PLAYING
if self._table.state == "paused":
if self._table.is_sleeping:
return STATE_OFF
return STATE_PAUSED
if self._table.state == "waiting":
return STATE_IDLE
return None
@property
def volume_level(self):
"""Return the current playback speed (0..1)."""
return self._table.speed
@property
def shuffle(self):
"""Return True if the current playlist is in shuffle mode."""
return self._table.is_shuffle
async def async_set_shuffle(self, shuffle):
"""Change the shuffle mode of the current playlist."""
await self._table.set_shuffle(shuffle)
@property
def media_playlist(self):
"""Return the name of the current playlist."""
return self._table.active_playlist.name if self._table.active_playlist else None
@property
def media_title(self):
"""Return the title of the current track."""
return self._table.active_track.name if self._table.active_track else None
@property
def media_content_type(self):
"""Return the content type currently playing; i.e. a Sisyphus track."""
return MEDIA_TYPE_TRACK
@property
def media_content_id(self):
"""Return the track ID of the current track."""
return self._table.active_track.id if self._table.active_track else None
@property
def media_duration(self):
"""Return the total time it will take to run this track at the current speed."""
return self._table.active_track_total_time.total_seconds()
@property
def media_position(self):
"""Return the current position within the track."""
return (
self._table.active_track_total_time
- self._table.active_track_remaining_time
).total_seconds()
@property
def media_position_updated_at(self):
"""Return the last time we got a position update."""
return self._table.active_track_remaining_time_as_of
@property
def supported_features(self):
"""Return the features supported by this table."""
return SUPPORTED_FEATURES
@property
def media_image_url(self):
"""Return the URL for a thumbnail image of the current track."""
if self._table.active_track:
return self._table.active_track.get_thumbnail_url(Track.ThumbnailSize.LARGE)
return super.media_image_url()
async def async_turn_on(self):
"""Wake up a sleeping table."""
await self._table.wakeup()
async def async_turn_off(self):
"""Put the table to sleep."""
await self._table.sleep()
async def async_volume_down(self):
"""Slow down playback."""
await self._table.set_speed(max(0, self._table.speed - 0.1))
async def async_volume_up(self):
"""Speed up playback."""
await self._table.set_speed(min(1.0, self._table.speed + 0.1))
async def async_set_volume_level(self, volume):
"""Set playback speed (0..1)."""
await self._table.set_speed(volume)
async def async_media_play(self):
"""Start playing."""
await self._table.play()
async def async_media_pause(self):
"""Pause."""
await self._table.pause()
async def async_media_next_track(self):
"""Skip to next track."""
cur_track_index = self._get_current_track_index()
await self._table.active_playlist.play(
self._table.active_playlist.tracks[cur_track_index + 1]
)
async def async_media_previous_track(self):
"""Skip to previous track."""
cur_track_index = self._get_current_track_index()
await self._table.active_playlist.play(
self._table.active_playlist.tracks[cur_track_index - 1]
)
def _get_current_track_index(self):
for index, track in enumerate(self._table.active_playlist.tracks):
if track.id == self._table.active_track.id:
return index
return -1
| apache-2.0 |
kamyu104/django | tests/signals/tests.py | 311 | 10273 | from __future__ import unicode_literals
from django.db import models
from django.db.models import signals
from django.dispatch import receiver
from django.test import TestCase
from django.utils import six
from .models import Author, Book, Car, Person
class BaseSignalTest(TestCase):
def setUp(self):
# Save up the number of connected signals so that we can check at the
# end that all the signals we register get properly unregistered (#9989)
self.pre_signals = (
len(signals.pre_save.receivers),
len(signals.post_save.receivers),
len(signals.pre_delete.receivers),
len(signals.post_delete.receivers),
)
def tearDown(self):
# Check that all our signals got disconnected properly.
post_signals = (
len(signals.pre_save.receivers),
len(signals.post_save.receivers),
len(signals.pre_delete.receivers),
len(signals.post_delete.receivers),
)
self.assertEqual(self.pre_signals, post_signals)
class SignalTests(BaseSignalTest):
def test_model_pre_init_and_post_init(self):
data = []
def pre_init_callback(sender, args, **kwargs):
data.append(kwargs['kwargs'])
signals.pre_init.connect(pre_init_callback)
def post_init_callback(sender, instance, **kwargs):
data.append(instance)
signals.post_init.connect(post_init_callback)
p1 = Person(first_name="John", last_name="Doe")
self.assertEqual(data, [{}, p1])
def test_save_signals(self):
data = []
def pre_save_handler(signal, sender, instance, **kwargs):
data.append(
(instance, kwargs.get("raw", False))
)
def post_save_handler(signal, sender, instance, **kwargs):
data.append(
(instance, kwargs.get("created"), kwargs.get("raw", False))
)
signals.pre_save.connect(pre_save_handler, weak=False)
signals.post_save.connect(post_save_handler, weak=False)
try:
p1 = Person.objects.create(first_name="John", last_name="Smith")
self.assertEqual(data, [
(p1, False),
(p1, True, False),
])
data[:] = []
p1.first_name = "Tom"
p1.save()
self.assertEqual(data, [
(p1, False),
(p1, False, False),
])
data[:] = []
# Calling an internal method purely so that we can trigger a "raw" save.
p1.save_base(raw=True)
self.assertEqual(data, [
(p1, True),
(p1, False, True),
])
data[:] = []
p2 = Person(first_name="James", last_name="Jones")
p2.id = 99999
p2.save()
self.assertEqual(data, [
(p2, False),
(p2, True, False),
])
data[:] = []
p2.id = 99998
p2.save()
self.assertEqual(data, [
(p2, False),
(p2, True, False),
])
finally:
signals.pre_save.disconnect(pre_save_handler)
signals.post_save.disconnect(post_save_handler)
def test_delete_signals(self):
data = []
def pre_delete_handler(signal, sender, instance, **kwargs):
data.append(
(instance, instance.id is None)
)
# #8285: signals can be any callable
class PostDeleteHandler(object):
def __init__(self, data):
self.data = data
def __call__(self, signal, sender, instance, **kwargs):
self.data.append(
(instance, instance.id is None)
)
post_delete_handler = PostDeleteHandler(data)
signals.pre_delete.connect(pre_delete_handler, weak=False)
signals.post_delete.connect(post_delete_handler, weak=False)
try:
p1 = Person.objects.create(first_name="John", last_name="Smith")
p1.delete()
self.assertEqual(data, [
(p1, False),
(p1, False),
])
data[:] = []
p2 = Person(first_name="James", last_name="Jones")
p2.id = 99999
p2.save()
p2.id = 99998
p2.save()
p2.delete()
self.assertEqual(data, [
(p2, False),
(p2, False)
])
data[:] = []
self.assertQuerysetEqual(
Person.objects.all(), [
"James Jones",
],
six.text_type
)
finally:
signals.pre_delete.disconnect(pre_delete_handler)
signals.post_delete.disconnect(post_delete_handler)
def test_decorators(self):
data = []
@receiver(signals.pre_save, weak=False)
def decorated_handler(signal, sender, instance, **kwargs):
data.append(instance)
@receiver(signals.pre_save, sender=Car, weak=False)
def decorated_handler_with_sender_arg(signal, sender, instance, **kwargs):
data.append(instance)
try:
c1 = Car.objects.create(make="Volkswagon", model="Passat")
self.assertEqual(data, [c1, c1])
finally:
signals.pre_save.disconnect(decorated_handler)
signals.pre_save.disconnect(decorated_handler_with_sender_arg, sender=Car)
def test_save_and_delete_signals_with_m2m(self):
data = []
def pre_save_handler(signal, sender, instance, **kwargs):
data.append('pre_save signal, %s' % instance)
if kwargs.get('raw'):
data.append('Is raw')
def post_save_handler(signal, sender, instance, **kwargs):
data.append('post_save signal, %s' % instance)
if 'created' in kwargs:
if kwargs['created']:
data.append('Is created')
else:
data.append('Is updated')
if kwargs.get('raw'):
data.append('Is raw')
def pre_delete_handler(signal, sender, instance, **kwargs):
data.append('pre_delete signal, %s' % instance)
data.append('instance.id is not None: %s' % (instance.id is not None))
def post_delete_handler(signal, sender, instance, **kwargs):
data.append('post_delete signal, %s' % instance)
data.append('instance.id is not None: %s' % (instance.id is not None))
signals.pre_save.connect(pre_save_handler, weak=False)
signals.post_save.connect(post_save_handler, weak=False)
signals.pre_delete.connect(pre_delete_handler, weak=False)
signals.post_delete.connect(post_delete_handler, weak=False)
try:
a1 = Author.objects.create(name='Neal Stephenson')
self.assertEqual(data, [
"pre_save signal, Neal Stephenson",
"post_save signal, Neal Stephenson",
"Is created"
])
data[:] = []
b1 = Book.objects.create(name='Snow Crash')
self.assertEqual(data, [
"pre_save signal, Snow Crash",
"post_save signal, Snow Crash",
"Is created"
])
data[:] = []
# Assigning and removing to/from m2m shouldn't generate an m2m signal.
b1.authors = [a1]
self.assertEqual(data, [])
b1.authors = []
self.assertEqual(data, [])
finally:
signals.pre_save.disconnect(pre_save_handler)
signals.post_save.disconnect(post_save_handler)
signals.pre_delete.disconnect(pre_delete_handler)
signals.post_delete.disconnect(post_delete_handler)
def test_disconnect_in_dispatch(self):
"""
Test that signals that disconnect when being called don't mess future
dispatching.
"""
class Handler(object):
def __init__(self, param):
self.param = param
self._run = False
def __call__(self, signal, sender, **kwargs):
self._run = True
signal.disconnect(receiver=self, sender=sender)
a, b = Handler(1), Handler(2)
signals.post_save.connect(a, sender=Person, weak=False)
signals.post_save.connect(b, sender=Person, weak=False)
Person.objects.create(first_name='John', last_name='Smith')
self.assertTrue(a._run)
self.assertTrue(b._run)
self.assertEqual(signals.post_save.receivers, [])
class LazyModelRefTest(BaseSignalTest):
def setUp(self):
super(LazyModelRefTest, self).setUp()
self.received = []
def receiver(self, **kwargs):
self.received.append(kwargs)
def test_invalid_sender_model_name(self):
with self.assertRaisesMessage(ValueError,
"Specified sender must either be a model or a "
"model name of the 'app_label.ModelName' form."):
signals.post_init.connect(self.receiver, sender='invalid')
def test_already_loaded_model(self):
signals.post_init.connect(
self.receiver, sender='signals.Book', weak=False
)
try:
instance = Book()
self.assertEqual(self.received, [{
'signal': signals.post_init,
'sender': Book,
'instance': instance
}])
finally:
signals.post_init.disconnect(self.receiver, sender=Book)
def test_not_loaded_model(self):
signals.post_init.connect(
self.receiver, sender='signals.Created', weak=False
)
try:
class Created(models.Model):
pass
instance = Created()
self.assertEqual(self.received, [{
'signal': signals.post_init, 'sender': Created, 'instance': instance
}])
finally:
signals.post_init.disconnect(self.receiver, sender=Created)
| bsd-3-clause |
droidapps/pdfreader4Android | deps/freetype-2.4.10/src/tools/docmaker/docmaker.py | 463 | 2766 | #!/usr/bin/env python
#
# DocMaker (c) 2002, 2004, 2008 David Turner <[email protected]>
#
# This program is a re-write of the original DocMaker took used
# to generate the API Reference of the FreeType font engine
# by converting in-source comments into structured HTML.
#
# This new version is capable of outputting XML data, as well
# as accepts more liberal formatting options.
#
# It also uses regular expression matching and substitution
# to speed things significantly.
#
from sources import *
from content import *
from utils import *
from formatter import *
from tohtml import *
import utils
import sys, os, time, string, glob, getopt
def usage():
print "\nDocMaker Usage information\n"
print " docmaker [options] file1 [file2 ...]\n"
print "using the following options:\n"
print " -h : print this page"
print " -t : set project title, as in '-t \"My Project\"'"
print " -o : set output directory, as in '-o mydir'"
print " -p : set documentation prefix, as in '-p ft2'"
print ""
print " --title : same as -t, as in '--title=\"My Project\"'"
print " --output : same as -o, as in '--output=mydir'"
print " --prefix : same as -p, as in '--prefix=ft2'"
def main( argv ):
"""main program loop"""
global output_dir
try:
opts, args = getopt.getopt( sys.argv[1:], \
"ht:o:p:", \
["help", "title=", "output=", "prefix="] )
except getopt.GetoptError:
usage()
sys.exit( 2 )
if args == []:
usage()
sys.exit( 1 )
# process options
#
project_title = "Project"
project_prefix = None
output_dir = None
for opt in opts:
if opt[0] in ( "-h", "--help" ):
usage()
sys.exit( 0 )
if opt[0] in ( "-t", "--title" ):
project_title = opt[1]
if opt[0] in ( "-o", "--output" ):
utils.output_dir = opt[1]
if opt[0] in ( "-p", "--prefix" ):
project_prefix = opt[1]
check_output()
# create context and processor
source_processor = SourceProcessor()
content_processor = ContentProcessor()
# retrieve the list of files to process
file_list = make_file_list( args )
for filename in file_list:
source_processor.parse_file( filename )
content_processor.parse_sources( source_processor )
# process sections
content_processor.finish()
formatter = HtmlFormatter( content_processor, project_title, project_prefix )
formatter.toc_dump()
formatter.index_dump()
formatter.section_dump_all()
# if called from the command line
#
if __name__ == '__main__':
main( sys.argv )
# eof
| gpl-3.0 |
jruiperezv/ANALYSE | lms/djangoapps/psychometrics/management/commands/init_psychometrics.py | 131 | 2392 | #!/usr/bin/python
#
# generate pyschometrics data from tracking logs and student module data
import json
from courseware.models import StudentModule
from track.models import TrackingLog
from psychometrics.models import PsychometricData
from django.conf import settings
from django.core.management.base import BaseCommand
#db = "ocwtutor" # for debugging
#db = "default"
db = getattr(settings, 'DATABASE_FOR_PSYCHOMETRICS', 'default')
class Command(BaseCommand):
help = "initialize PsychometricData tables from StudentModule instances (and tracking data, if in SQL)."
help += "Note this is done for all courses for which StudentModule instances exist."
def handle(self, *args, **options):
# delete all pmd
#PsychometricData.objects.all().delete()
#PsychometricData.objects.using(db).all().delete()
smset = StudentModule.objects.using(db).exclude(max_grade=None)
for sm in smset:
usage_key = sm.module_state_key
if not usage_key.block_type == "problem":
continue
try:
state = json.loads(sm.state)
done = state['done']
except:
print "Oops, failed to eval state for %s (state=%s)" % (sm, sm.state)
continue
if done: # only keep if problem completed
try:
pmd = PsychometricData.objects.using(db).get(studentmodule=sm)
except PsychometricData.DoesNotExist:
pmd = PsychometricData(studentmodule=sm)
pmd.done = done
pmd.attempts = state['attempts']
# get attempt times from tracking log
uname = sm.student.username
tset = TrackingLog.objects.using(db).filter(username=uname, event_type__contains='problem_check')
tset = tset.filter(event_source='server')
tset = tset.filter(event__contains="'%s'" % usage_key)
checktimes = [x.dtcreated for x in tset]
pmd.checktimes = checktimes
if not len(checktimes) == pmd.attempts:
print "Oops, mismatch in number of attempts and check times for %s" % pmd
#print pmd
pmd.save(using=db)
print "%d PMD entries" % PsychometricData.objects.using(db).all().count()
| agpl-3.0 |
DESatAPSU/DAWDs | python/origBandpass_FITSToCSV.py | 1 | 1930 | # Converts STD_BANDPASSES_Y3A1_FGCM_20170630_extend3000.fits to
# y3a2_std_passband_extend3000_ugrizYatm.csv
#
# To run (bash):
# python origBandpass_FITSToCSV.py > origBandpass_FITSToCSV.log 2>&1 &
#
# To run (tcsh):
# python origBandpass_FITSToCSV.py >& origBandpass_FITSToCSV.log &
#
# DLT, 2017-06-30
# based in part on scripts by Jack Mueller and Jacob Robertson.
# Initial setup...
import numpy as np
import pandas as pd
import os
import string
import shutil
import pyfits
# Be sure to edit these next two line2 appropriately...
bandsDir = '/Users/dtucker/IRAF/DECam/StdBands_Y3A2_extend3000'
inputFile = bandsDir+'/'+'STD_BANDPASSES_Y3A1_FGCM_20170630_extend3000.fits'
# List of filter bands (plus atm)...
bandList = ['g', 'r', 'i', 'z', 'Y', 'atm']
# Read in inputFile to create a reformatted version in CSV format...
hdulist = pyfits.open(inputFile)
tbdata = hdulist[1].data
# Create lists from each column...
lambdaList = tbdata['LAMBDA'].tolist()
gList = tbdata['g'].tolist()
rList = tbdata['r'].tolist()
iList = tbdata['i'].tolist()
zList = tbdata['z'].tolist()
YList = tbdata['Y'].tolist()
atmList = tbdata['atm'].tolist()
# Create pandas dataframe from the lists...
df = pd.DataFrame(np.column_stack([lambdaList,gList,rList,iList,zList,YList,atmList]),
columns=['lambda','g','r','i','z','Y','atm'])
# Output the full table as a CSV file
outputFile = bandsDir+'/'+'STD_BANDPASSES_Y3A1_FGCM_20170630_extend3000.csv'
if os.path.isfile(outputFile):
shutil.move(outputFile, outputFile+'~')
df.to_csv(outputFile,index=False)
# Output individual bands (+atm)...
for band in bandList:
outputFile = bandsDir+'/'+'STD_BANDPASSES_Y3A1_FGCM_20170630_extend3000.'+band+'.csv'
if os.path.isfile(outputFile):
shutil.move(outputFile, outputFile+'~')
columnNames = ['lambda',band]
df.to_csv(outputFile,index=False,columns=columnNames,header=False)
# Finis!
exit()
| mit |
Schwittleymani/ECO | src/python/keras_lstm/lstm_wrapper.py | 2 | 4929 | from keras.models import Sequential
from keras.models import load_model, model_from_json
from keras.layers import Dense, Activation, Dropout
from keras.layers import LSTM
from keras.optimizers import RMSprop
import numpy as np
import pickle
import os
class LSTMWrapper(object):
def __init__(self, maxlen, step):
self.maxlen = maxlen
self.step = step
self.name = None
def load(self, path):
text = open(path).read().lower()
print('corpus length:', len(text))
self.chars = sorted(list(set(text)))
print('total chars:', len(self.chars))
self.char_indices = dict((c, i) for i, c in enumerate(self.chars))
self.indices_char = dict((i, c) for i, c in enumerate(self.chars))
# cut the text in semi-redundant sequences of maxlen characters
sentences = []
next_chars = []
for i in range(0, len(text) - self.maxlen, self.step):
sentences.append(text[i: i + self.maxlen])
next_chars.append(text[i + self.maxlen])
print('nb sequences:', len(sentences))
print('Vectorization...')
self.X = np.zeros((len(sentences), self.maxlen, len(self.chars)), dtype=np.bool)
self.y = np.zeros((len(sentences), len(self.chars)), dtype=np.bool)
for i, sentence in enumerate(sentences):
for t, char in enumerate(sentence):
self.X[i, t, self.char_indices[char]] = 1
self.y[i, self.char_indices[next_chars[i]]] = 1
# build the model: a single LSTM
print('Build model...')
self.model = Sequential()
self.model.add(LSTM(128, input_shape=(self.maxlen, len(self.chars))))
self.model.add(Dense(len(self.chars)))
self.model.add(Activation('softmax'))
optimizer = RMSprop(lr=0.01)
self.model.compile(loss='categorical_crossentropy', optimizer=optimizer)
def train(self, iterations, epochs, model_save_path=None, save_every=0):
# train the model
for iteration in xrange(iterations):
print('Iteration ' + str(iteration) + ' / ' + str(iterations))
self.model.fit(self.X, self.y, batch_size=128, nb_epoch=epochs)
if save_every != 0 and iteration % save_every == 0:
output_path = model_save_path + '_iteration' + str(iteration)
self.save_model(output_path)
def sample(self, diversity, seed, output_length):
output_text = seed.rjust(self.maxlen)
input_text = seed.rjust(self.maxlen)
for i in range(output_length):
x = np.zeros((1, self.maxlen, len(self.chars)))
for t, char in enumerate(input_text):
x[0, t, self.char_indices[char]] = 1.
preds = self.model.predict(x, verbose=0)[0]
next_index = self.__sample_character(preds, diversity)
next_char = self.indices_char[next_index]
input_text = input_text[1:] + next_char
output_text += next_char
return output_text
def save_model(self, path):
directory, filename = os.path.split(path)
if not os.path.exists(directory):
os.makedirs(directory)
print('Saving model to' + path)
model_json = self.model.to_json()
with open(path + '.json', 'w') as json_file:
json_file.write(model_json)
self.model.save_weights(path)
with open(path + '_chars.pkl', 'wb') as file:
pickle.dump(self.chars, file, pickle.HIGHEST_PROTOCOL)
with open(path + '_char_indices.pkl', 'wb') as file:
pickle.dump(self.char_indices, file, pickle.HIGHEST_PROTOCOL)
with open(path + '_indices_char.pkl', 'wb') as file:
pickle.dump(self.indices_char, file, pickle.HIGHEST_PROTOCOL)
def load_model(self, path):
print('Loading model from ' + path)
json_file = open(path + '.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
self.model = model_from_json(loaded_model_json)
self.model.load_weights(path)
optimizer = RMSprop(lr=0.01)
self.model.compile(loss='categorical_crossentropy', optimizer=optimizer)
with open(path + '_chars.pkl', 'rb') as file:
self.chars = pickle.load(file)
with open(path + '_char_indices.pkl', 'rb') as file:
self.char_indices = pickle.load(file)
with open(path + '_indices_char.pkl', 'rb') as file:
self.indices_char = pickle.load(file)
def __sample_character(self, preds, temperature=1.0):
# helper function to sample an index from a probability array
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
| apache-2.0 |
wilvk/ansible | lib/ansible/modules/windows/win_lineinfile.py | 46 | 7167 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_lineinfile
author: "Brian Lloyd <[email protected]>"
short_description: Ensure a particular line is in a file, or replace an existing line using a back-referenced regular expression.
description:
- This module will search a file for a line, and ensure that it is present or absent.
- This is primarily useful when you want to change a single line in a file only.
version_added: "2.0"
options:
path:
required: true
aliases: [ dest, destfile, name ]
description:
- The path of the file to modify.
- Note that the Windows path delimiter C(\) must be escaped as C(\\) when the line is double quoted.
- Before 2.3 this option was only usable as I(dest), I(destfile) and I(name).
regexp:
required: false
description:
- >
The regular expression to look for in every line of the file. For C(state=present), the pattern to replace if found; only the last line found
will be replaced. For C(state=absent), the pattern of the line to remove. Uses .NET compatible regular expressions;
see U(https://msdn.microsoft.com/en-us/library/hs600312%28v=vs.110%29.aspx).
state:
required: false
choices: [ present, absent ]
default: "present"
description:
- Whether the line should be there or not.
line:
required: false
description:
- Required for C(state=present). The line to insert/replace into the file. If C(backrefs) is set, may contain backreferences that will get
expanded with the C(regexp) capture groups if the regexp matches.
backrefs:
required: false
default: "no"
choices: [ "yes", "no" ]
description:
- Used with C(state=present). If set, line can contain backreferences (both positional and named) that will get populated if the C(regexp)
matches. This flag changes the operation of the module slightly; C(insertbefore) and C(insertafter) will be ignored, and if the C(regexp)
doesn't match anywhere in the file, the file will be left unchanged.
- If the C(regexp) does match, the last matching line will be replaced by the expanded line parameter.
insertafter:
required: false
default: EOF
description:
- Used with C(state=present). If specified, the line will be inserted after the last match of specified regular expression. A special value is
available; C(EOF) for inserting the line at the end of the file.
- If specified regular expression has no matches, EOF will be used instead. May not be used with C(backrefs).
choices: [ 'EOF', '*regex*' ]
insertbefore:
required: false
description:
- Used with C(state=present). If specified, the line will be inserted before the last match of specified regular expression. A value is available;
C(BOF) for inserting the line at the beginning of the file.
- If specified regular expression has no matches, the line will be inserted at the end of the file. May not be used with C(backrefs).
choices: [ 'BOF', '*regex*' ]
create:
required: false
choices: [ "yes", "no" ]
default: "no"
description:
- Used with C(state=present). If specified, the file will be created if it does not already exist. By default it will fail if the file is missing.
backup:
required: false
default: "no"
choices: [ "yes", "no" ]
description:
- Create a backup file including the timestamp information so you can get the original file back if you somehow clobbered it incorrectly.
validate:
required: false
description:
- Validation to run before copying into place. Use %s in the command to indicate the current file to validate.
- The command is passed securely so shell features like expansion and pipes won't work.
default: None
encoding:
required: false
default: "auto"
description:
- Specifies the encoding of the source text file to operate on (and thus what the output encoding will be). The default of C(auto) will cause
the module to auto-detect the encoding of the source file and ensure that the modified file is written with the same encoding.
- >
An explicit encoding can be passed as a string that is a valid value to pass to the .NET framework System.Text.Encoding.GetEncoding() method - see
U(https://msdn.microsoft.com/en-us/library/system.text.encoding%28v=vs.110%29.aspx).
- This is mostly useful with C(create=yes) if you want to create a new file with a specific encoding. If C(create=yes) is specified without a
specific encoding, the default encoding (UTF-8, no BOM) will be used.
newline:
required: false
description:
- >
Specifies the line separator style to use for the modified file. This defaults to the windows line separator (C(\r\n)). Note that the indicated
line separator will be used for file output regardless of the original line separator that appears in the input file.
choices: [ "windows", "unix" ]
default: "windows"
notes:
- As of Ansible 2.3, the I(dest) option has been changed to I(path) as default, but I(dest) still works as well.
'''
EXAMPLES = r'''
# Before 2.3, option 'dest', 'destfile' or 'name' was used instead of 'path'
- win_lineinfile:
path: C:\temp\example.conf
regexp: '^name='
line: 'name=JohnDoe'
- win_lineinfile:
path: C:\temp\example.conf
regexp: '^name='
state: absent
- win_lineinfile:
path: C:\temp\example.conf
regexp: '^127\.0\.0\.1'
line: '127.0.0.1 localhost'
- win_lineinfile:
path: C:\temp\httpd.conf
regexp: '^Listen '
insertafter: '^#Listen '
line: Listen 8080
- win_lineinfile:
path: C:\temp\services
regexp: '^# port for http'
insertbefore: '^www.*80/tcp'
line: '# port for http by default'
# Create file if it doesn't exist with a specific encoding
- win_lineinfile:
path: C:\temp\utf16.txt
create: yes
encoding: utf-16
line: This is a utf-16 encoded file
# Add a line to a file and ensure the resulting file uses unix line separators
- win_lineinfile:
path: C:\temp\testfile.txt
line: Line added to file
newline: unix
# Update a line using backrefs
- win_lineinfile:
path: C:\temp\example.conf
backrefs: yes
regexp: '(^name=)'
line: '$1JohnDoe'
'''
| gpl-3.0 |
Akasurde/pytest | testing/test_doctest.py | 10 | 12556 | from _pytest.doctest import DoctestItem, DoctestModule, DoctestTextfile
import py
class TestDoctests:
def test_collect_testtextfile(self, testdir):
w = testdir.maketxtfile(whatever="")
checkfile = testdir.maketxtfile(test_something="""
alskdjalsdk
>>> i = 5
>>> i-1
4
""")
for x in (testdir.tmpdir, checkfile):
#print "checking that %s returns custom items" % (x,)
items, reprec = testdir.inline_genitems(x)
assert len(items) == 1
assert isinstance(items[0], DoctestTextfile)
items, reprec = testdir.inline_genitems(w)
assert len(items) == 1
def test_collect_module_empty(self, testdir):
path = testdir.makepyfile(whatever="#")
for p in (path, testdir.tmpdir):
items, reprec = testdir.inline_genitems(p,
'--doctest-modules')
assert len(items) == 0
def test_collect_module_single_modulelevel_doctest(self, testdir):
path = testdir.makepyfile(whatever='""">>> pass"""')
for p in (path, testdir.tmpdir):
items, reprec = testdir.inline_genitems(p,
'--doctest-modules')
assert len(items) == 1
assert isinstance(items[0], DoctestItem)
assert isinstance(items[0].parent, DoctestModule)
def test_collect_module_two_doctest_one_modulelevel(self, testdir):
path = testdir.makepyfile(whatever="""
'>>> x = None'
def my_func():
">>> magic = 42 "
""")
for p in (path, testdir.tmpdir):
items, reprec = testdir.inline_genitems(p,
'--doctest-modules')
assert len(items) == 2
assert isinstance(items[0], DoctestItem)
assert isinstance(items[1], DoctestItem)
assert isinstance(items[0].parent, DoctestModule)
assert items[0].parent is items[1].parent
def test_collect_module_two_doctest_no_modulelevel(self, testdir):
path = testdir.makepyfile(whatever="""
'# Empty'
def my_func():
">>> magic = 42 "
def unuseful():
'''
# This is a function
# >>> # it doesn't have any doctest
'''
def another():
'''
# This is another function
>>> import os # this one does have a doctest
'''
""")
for p in (path, testdir.tmpdir):
items, reprec = testdir.inline_genitems(p,
'--doctest-modules')
assert len(items) == 2
assert isinstance(items[0], DoctestItem)
assert isinstance(items[1], DoctestItem)
assert isinstance(items[0].parent, DoctestModule)
assert items[0].parent is items[1].parent
def test_simple_doctestfile(self, testdir):
p = testdir.maketxtfile(test_doc="""
>>> x = 1
>>> x == 1
False
""")
reprec = testdir.inline_run(p, )
reprec.assertoutcome(failed=1)
def test_new_pattern(self, testdir):
p = testdir.maketxtfile(xdoc ="""
>>> x = 1
>>> x == 1
False
""")
reprec = testdir.inline_run(p, "--doctest-glob=x*.txt")
reprec.assertoutcome(failed=1)
def test_doctest_unexpected_exception(self, testdir):
testdir.maketxtfile("""
>>> i = 0
>>> 0 / i
2
""")
result = testdir.runpytest("--doctest-modules")
result.stdout.fnmatch_lines([
"*unexpected_exception*",
"*>>> i = 0*",
"*>>> 0 / i*",
"*UNEXPECTED*ZeroDivision*",
])
def test_doctest_linedata_missing(self, testdir):
testdir.tmpdir.join('hello.py').write(py.code.Source("""
class Fun(object):
@property
def test(self):
'''
>>> a = 1
>>> 1/0
'''
"""))
result = testdir.runpytest("--doctest-modules")
result.stdout.fnmatch_lines([
"*hello*",
"*EXAMPLE LOCATION UNKNOWN, not showing all tests of that example*",
"*1/0*",
"*UNEXPECTED*ZeroDivision*",
"*1 failed*",
])
def test_doctest_unex_importerror(self, testdir):
testdir.tmpdir.join("hello.py").write(py.code.Source("""
import asdalsdkjaslkdjasd
"""))
testdir.maketxtfile("""
>>> import hello
>>>
""")
result = testdir.runpytest("--doctest-modules")
result.stdout.fnmatch_lines([
"*>>> import hello",
"*UNEXPECTED*ImportError*",
"*import asdals*",
])
def test_doctestmodule(self, testdir):
p = testdir.makepyfile("""
'''
>>> x = 1
>>> x == 1
False
'''
""")
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(failed=1)
def test_doctestmodule_external_and_issue116(self, testdir):
p = testdir.mkpydir("hello")
p.join("__init__.py").write(py.code.Source("""
def somefunc():
'''
>>> i = 0
>>> i + 1
2
'''
"""))
result = testdir.runpytest(p, "--doctest-modules")
result.stdout.fnmatch_lines([
'004 *>>> i = 0',
'005 *>>> i + 1',
'*Expected:',
"* 2",
"*Got:",
"* 1",
"*:5: DocTestFailure"
])
def test_txtfile_failing(self, testdir):
p = testdir.maketxtfile("""
>>> i = 0
>>> i + 1
2
""")
result = testdir.runpytest(p, "-s")
result.stdout.fnmatch_lines([
'001 >>> i = 0',
'002 >>> i + 1',
'Expected:',
" 2",
"Got:",
" 1",
"*test_txtfile_failing.txt:2: DocTestFailure"
])
def test_txtfile_with_fixtures(self, testdir):
p = testdir.maketxtfile("""
>>> dir = getfixture('tmpdir')
>>> type(dir).__name__
'LocalPath'
""")
reprec = testdir.inline_run(p, )
reprec.assertoutcome(passed=1)
def test_txtfile_with_usefixtures_in_ini(self, testdir):
testdir.makeini("""
[pytest]
usefixtures = myfixture
""")
testdir.makeconftest("""
import pytest
@pytest.fixture
def myfixture(monkeypatch):
monkeypatch.setenv("HELLO", "WORLD")
""")
p = testdir.maketxtfile("""
>>> import os
>>> os.environ["HELLO"]
'WORLD'
""")
reprec = testdir.inline_run(p, )
reprec.assertoutcome(passed=1)
def test_doctestmodule_with_fixtures(self, testdir):
p = testdir.makepyfile("""
'''
>>> dir = getfixture('tmpdir')
>>> type(dir).__name__
'LocalPath'
'''
""")
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(passed=1)
def test_doctestmodule_three_tests(self, testdir):
p = testdir.makepyfile("""
'''
>>> dir = getfixture('tmpdir')
>>> type(dir).__name__
'LocalPath'
'''
def my_func():
'''
>>> magic = 42
>>> magic - 42
0
'''
def unuseful():
pass
def another():
'''
>>> import os
>>> os is os
True
'''
""")
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(passed=3)
def test_doctestmodule_two_tests_one_fail(self, testdir):
p = testdir.makepyfile("""
class MyClass:
def bad_meth(self):
'''
>>> magic = 42
>>> magic
0
'''
def nice_meth(self):
'''
>>> magic = 42
>>> magic - 42
0
'''
""")
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(failed=1, passed=1)
def test_ignored_whitespace(self, testdir):
testdir.makeini("""
[pytest]
doctest_optionflags = ELLIPSIS NORMALIZE_WHITESPACE
""")
p = testdir.makepyfile("""
class MyClass:
'''
>>> a = "foo "
>>> print(a)
foo
'''
pass
""")
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(passed=1)
def test_non_ignored_whitespace(self, testdir):
testdir.makeini("""
[pytest]
doctest_optionflags = ELLIPSIS
""")
p = testdir.makepyfile("""
class MyClass:
'''
>>> a = "foo "
>>> print(a)
foo
'''
pass
""")
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(failed=1, passed=0)
def test_ignored_whitespace_glob(self, testdir):
testdir.makeini("""
[pytest]
doctest_optionflags = ELLIPSIS NORMALIZE_WHITESPACE
""")
p = testdir.maketxtfile(xdoc="""
>>> a = "foo "
>>> print(a)
foo
""")
reprec = testdir.inline_run(p, "--doctest-glob=x*.txt")
reprec.assertoutcome(passed=1)
def test_non_ignored_whitespace_glob(self, testdir):
testdir.makeini("""
[pytest]
doctest_optionflags = ELLIPSIS
""")
p = testdir.maketxtfile(xdoc="""
>>> a = "foo "
>>> print(a)
foo
""")
reprec = testdir.inline_run(p, "--doctest-glob=x*.txt")
reprec.assertoutcome(failed=1, passed=0)
def test_ignore_import_errors_on_doctest(self, testdir):
p = testdir.makepyfile("""
import asdf
def add_one(x):
'''
>>> add_one(1)
2
'''
return x + 1
""")
reprec = testdir.inline_run(p, "--doctest-modules",
"--doctest-ignore-import-errors")
reprec.assertoutcome(skipped=1, failed=1, passed=0)
def test_junit_report_for_doctest(self, testdir):
"""
#713: Fix --junit-xml option when used with --doctest-modules.
"""
p = testdir.makepyfile("""
def foo():
'''
>>> 1 + 1
3
'''
pass
""")
reprec = testdir.inline_run(p, "--doctest-modules",
"--junit-xml=junit.xml")
reprec.assertoutcome(failed=1)
def test_doctest_module_session_fixture(self, testdir):
"""Test that session fixtures are initialized for doctest modules (#768)
"""
# session fixture which changes some global data, which will
# be accessed by doctests in a module
testdir.makeconftest("""
import pytest
import sys
@pytest.yield_fixture(autouse=True, scope='session')
def myfixture():
assert not hasattr(sys, 'pytest_session_data')
sys.pytest_session_data = 1
yield
del sys.pytest_session_data
""")
testdir.makepyfile(foo="""
import sys
def foo():
'''
>>> assert sys.pytest_session_data == 1
'''
def bar():
'''
>>> assert sys.pytest_session_data == 1
'''
""")
result = testdir.runpytest("--doctest-modules")
result.stdout.fnmatch_lines('*2 passed*')
| mit |
rubgombar1/sharing-cars | sharingcars/common/migrations/0001_initial.py | 1 | 4372 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-05-02 14:13
from __future__ import unicode_literals
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import sharingcars.helpers.User
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0007_alter_validators_add_error_messages'),
]
operations = [
migrations.CreateModel(
name='Actor',
fields=[
('user_account', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('subject', models.CharField(max_length=256)),
('comment', models.TextField()),
('rating', models.IntegerField(validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(10)])),
],
),
migrations.CreateModel(
name='Folder',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256)),
],
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('subject', models.CharField(max_length=256)),
('body', models.TextField(max_length=256)),
('creationMoment', models.DateTimeField(auto_now_add=True)),
('folder', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='common.Folder')),
],
),
migrations.CreateModel(
name='Administrator',
fields=[
('actor_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='common.Actor')),
],
bases=('common.actor',),
),
migrations.CreateModel(
name='User',
fields=[
('actor_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='common.Actor')),
('name', models.CharField(max_length=256)),
('surnames', models.CharField(max_length=256)),
('city', models.CharField(max_length=256)),
('birthdate', models.DateField()),
('phone', models.CharField(max_length=256, validators=[django.core.validators.RegexValidator('^\\+?\\d+$')])),
('searchinCar', models.BooleanField(default=False)),
('photo', models.ImageField(default='default', null=True, upload_to=sharingcars.helpers.User.path_generator)),
],
bases=('common.actor',),
),
migrations.AddField(
model_name='message',
name='recipient',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='recipient', to='common.Actor'),
),
migrations.AddField(
model_name='message',
name='sender',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sender', to='common.Actor'),
),
migrations.AddField(
model_name='folder',
name='actor',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='common.Actor'),
),
migrations.AddField(
model_name='comment',
name='evaluated',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='evaluated', to='common.User'),
),
migrations.AddField(
model_name='comment',
name='referrer',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='referrer', to='common.User'),
),
]
| gpl-3.0 |
netzary/Kaline | ldapdb/models/__init__.py | 2 | 1719 | # -*- coding: utf-8 -*-
#
# django-ldapdb
# Copyright (c) 2009-2011, Bolloré telecom
# All rights reserved.
#
# See AUTHORS file for a full list of contributors.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Bolloré telecom nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from ldapdb.models.base import Model
| mit |
bbaumer/ansible-modules-core | system/cron.py | 39 | 17136 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2012, Dane Summers <[email protected]>
# (c) 2013, Mike Grozak <[email protected]>
# (c) 2013, Patrick Callahan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Cron Plugin: The goal of this plugin is to provide an indempotent method for
# setting up cron jobs on a host. The script will play well with other manually
# entered crons. Each cron job entered will be preceded with a comment
# describing the job so that it can be found later, which is required to be
# present in order for this plugin to find/modify the job.
#
# This module is based on python-crontab by Martin Owens.
#
DOCUMENTATION = """
---
module: cron
short_description: Manage cron.d and crontab entries.
description:
- Use this module to manage crontab entries. This module allows you to create named
crontab entries, update, or delete them.
- 'The module includes one line with the description of the crontab entry C("#Ansible: <name>")
corresponding to the "name" passed to the module, which is used by future ansible/module calls
to find/check the state. The "name" parameter should be unique, and changing the "name" value
will result in a new cron task being created (or a different one being removed)'
version_added: "0.9"
options:
name:
description:
- Description of a crontab entry.
default: null
required: false
user:
description:
- The specific user whose crontab should be modified.
required: false
default: root
job:
description:
- The command to execute. Required if state=present.
required: false
default: null
state:
description:
- Whether to ensure the job is present or absent.
required: false
default: present
choices: [ "present", "absent" ]
cron_file:
description:
- If specified, uses this file in cron.d instead of an individual user's crontab.
required: false
default: null
backup:
description:
- If set, create a backup of the crontab before it is modified.
The location of the backup is returned in the C(backup_file) variable by this module.
required: false
choices: [ "yes", "no" ]
default: no
minute:
description:
- Minute when the job should run ( 0-59, *, */2, etc )
required: false
default: "*"
hour:
description:
- Hour when the job should run ( 0-23, *, */2, etc )
required: false
default: "*"
day:
description:
- Day of the month the job should run ( 1-31, *, */2, etc )
required: false
default: "*"
aliases: [ "dom" ]
month:
description:
- Month of the year the job should run ( 1-12, *, */2, etc )
required: false
default: "*"
weekday:
description:
- Day of the week that the job should run ( 0-6 for Sunday-Saturday, *, etc )
required: false
default: "*"
aliases: [ "dow" ]
reboot:
description:
- If the job should be run at reboot. This option is deprecated. Users should use special_time.
version_added: "1.0"
required: false
default: "no"
choices: [ "yes", "no" ]
special_time:
description:
- Special time specification nickname.
version_added: "1.3"
required: false
default: null
choices: [ "reboot", "yearly", "annually", "monthly", "weekly", "daily", "hourly" ]
requirements:
- cron
author: "Dane Summers (@dsummersl)"
updates: [ 'Mike Grozak', 'Patrick Callahan' ]
"""
EXAMPLES = '''
# Ensure a job that runs at 2 and 5 exists.
# Creates an entry like "0 5,2 * * ls -alh > /dev/null"
- cron: name="check dirs" minute="0" hour="5,2" job="ls -alh > /dev/null"
# Ensure an old job is no longer present. Removes any job that is prefixed
# by "#Ansible: an old job" from the crontab
- cron: name="an old job" state=absent
# Creates an entry like "@reboot /some/job.sh"
- cron: name="a job for reboot" special_time=reboot job="/some/job.sh"
# Creates a cron file under /etc/cron.d
- cron: name="yum autoupdate" weekday="2" minute=0 hour=12
user="root" job="YUMINTERACTIVE=0 /usr/sbin/yum-autoupdate"
cron_file=ansible_yum-autoupdate
# Removes a cron file from under /etc/cron.d
- cron: name="yum autoupdate" cron_file=ansible_yum-autoupdate state=absent
'''
import os
import re
import tempfile
import platform
import pipes
CRONCMD = "/usr/bin/crontab"
class CronTabError(Exception):
pass
class CronTab(object):
"""
CronTab object to write time based crontab file
user - the user of the crontab (defaults to root)
cron_file - a cron file under /etc/cron.d
"""
def __init__(self, module, user=None, cron_file=None):
self.module = module
self.user = user
self.root = (os.getuid() == 0)
self.lines = None
self.ansible = "#Ansible: "
# select whether we dump additional debug info through syslog
self.syslogging = False
if cron_file:
self.cron_file = '/etc/cron.d/%s' % cron_file
else:
self.cron_file = None
self.read()
def read(self):
# Read in the crontab from the system
self.lines = []
if self.cron_file:
# read the cronfile
try:
f = open(self.cron_file, 'r')
self.lines = f.read().splitlines()
f.close()
except IOError, e:
# cron file does not exist
return
except:
raise CronTabError("Unexpected error:", sys.exc_info()[0])
else:
# using safely quoted shell for now, but this really should be two non-shell calls instead. FIXME
(rc, out, err) = self.module.run_command(self._read_user_execute(), use_unsafe_shell=True)
if rc != 0 and rc != 1: # 1 can mean that there are no jobs.
raise CronTabError("Unable to read crontab")
lines = out.splitlines()
count = 0
for l in lines:
if count > 2 or (not re.match( r'# DO NOT EDIT THIS FILE - edit the master and reinstall.', l) and
not re.match( r'# \(/tmp/.*installed on.*\)', l) and
not re.match( r'# \(.*version.*\)', l)):
self.lines.append(l)
count += 1
def log_message(self, message):
if self.syslogging:
syslog.syslog(syslog.LOG_NOTICE, 'ansible: "%s"' % message)
def is_empty(self):
if len(self.lines) == 0:
return True
else:
return False
def write(self, backup_file=None):
"""
Write the crontab to the system. Saves all information.
"""
if backup_file:
fileh = open(backup_file, 'w')
elif self.cron_file:
fileh = open(self.cron_file, 'w')
else:
filed, path = tempfile.mkstemp(prefix='crontab')
fileh = os.fdopen(filed, 'w')
fileh.write(self.render())
fileh.close()
# return if making a backup
if backup_file:
return
# Add the entire crontab back to the user crontab
if not self.cron_file:
# quoting shell args for now but really this should be two non-shell calls. FIXME
(rc, out, err) = self.module.run_command(self._write_execute(path), use_unsafe_shell=True)
os.unlink(path)
if rc != 0:
self.module.fail_json(msg=err)
def add_job(self, name, job):
# Add the comment
self.lines.append("%s%s" % (self.ansible, name))
# Add the job
self.lines.append("%s" % (job))
def update_job(self, name, job):
return self._update_job(name, job, self.do_add_job)
def do_add_job(self, lines, comment, job):
lines.append(comment)
lines.append("%s" % (job))
def remove_job(self, name):
return self._update_job(name, "", self.do_remove_job)
def do_remove_job(self, lines, comment, job):
return None
def remove_job_file(self):
try:
os.unlink(self.cron_file)
return True
except OSError, e:
# cron file does not exist
return False
except:
raise CronTabError("Unexpected error:", sys.exc_info()[0])
def find_job(self, name):
comment = None
for l in self.lines:
if comment is not None:
if comment == name:
return [comment, l]
else:
comment = None
elif re.match( r'%s' % self.ansible, l):
comment = re.sub( r'%s' % self.ansible, '', l)
return []
def get_cron_job(self,minute,hour,day,month,weekday,job,special):
if special:
if self.cron_file:
return "@%s %s %s" % (special, self.user, job)
else:
return "@%s %s" % (special, job)
else:
if self.cron_file:
return "%s %s %s %s %s %s %s" % (minute,hour,day,month,weekday,self.user,job)
else:
return "%s %s %s %s %s %s" % (minute,hour,day,month,weekday,job)
return None
def get_jobnames(self):
jobnames = []
for l in self.lines:
if re.match( r'%s' % self.ansible, l):
jobnames.append(re.sub( r'%s' % self.ansible, '', l))
return jobnames
def _update_job(self, name, job, addlinesfunction):
ansiblename = "%s%s" % (self.ansible, name)
newlines = []
comment = None
for l in self.lines:
if comment is not None:
addlinesfunction(newlines, comment, job)
comment = None
elif l == ansiblename:
comment = l
else:
newlines.append(l)
self.lines = newlines
if len(newlines) == 0:
return True
else:
return False # TODO add some more error testing
def render(self):
"""
Render this crontab as it would be in the crontab.
"""
crons = []
for cron in self.lines:
crons.append(cron)
result = '\n'.join(crons)
if result and result[-1] not in ['\n', '\r']:
result += '\n'
return result
def _read_user_execute(self):
"""
Returns the command line for reading a crontab
"""
user = ''
if self.user:
if platform.system() == 'SunOS':
return "su %s -c '%s -l'" % (pipes.quote(self.user), pipes.quote(CRONCMD))
elif platform.system() == 'AIX':
return "%s -l %s" % (pipes.quote(CRONCMD), pipes.quote(self.user))
elif platform.system() == 'HP-UX':
return "%s %s %s" % (CRONCMD , '-l', pipes.quote(self.user))
else:
user = '-u %s' % pipes.quote(self.user)
return "%s %s %s" % (CRONCMD , user, '-l')
def _write_execute(self, path):
"""
Return the command line for writing a crontab
"""
user = ''
if self.user:
if platform.system() in ['SunOS', 'HP-UX', 'AIX']:
return "chown %s %s ; su '%s' -c '%s %s'" % (pipes.quote(self.user), pipes.quote(path), pipes.quote(self.user), CRONCMD, pipes.quote(path))
else:
user = '-u %s' % pipes.quote(self.user)
return "%s %s %s" % (CRONCMD , user, pipes.quote(path))
#==================================================
def main():
# The following example playbooks:
#
# - cron: name="check dirs" hour="5,2" job="ls -alh > /dev/null"
#
# - name: do the job
# cron: name="do the job" hour="5,2" job="/some/dir/job.sh"
#
# - name: no job
# cron: name="an old job" state=absent
#
# Would produce:
# # Ansible: check dirs
# * * 5,2 * * ls -alh > /dev/null
# # Ansible: do the job
# * * 5,2 * * /some/dir/job.sh
module = AnsibleModule(
argument_spec = dict(
name=dict(required=False),
user=dict(required=False),
job=dict(required=False),
cron_file=dict(required=False),
state=dict(default='present', choices=['present', 'absent']),
backup=dict(default=False, type='bool'),
minute=dict(default='*'),
hour=dict(default='*'),
day=dict(aliases=['dom'], default='*'),
month=dict(default='*'),
weekday=dict(aliases=['dow'], default='*'),
reboot=dict(required=False, default=False, type='bool'),
special_time=dict(required=False,
default=None,
choices=["reboot", "yearly", "annually", "monthly", "weekly", "daily", "hourly"],
type='str')
),
supports_check_mode = False,
)
name = module.params['name']
user = module.params['user']
job = module.params['job']
cron_file = module.params['cron_file']
state = module.params['state']
backup = module.params['backup']
minute = module.params['minute']
hour = module.params['hour']
day = module.params['day']
month = module.params['month']
weekday = module.params['weekday']
reboot = module.params['reboot']
special_time = module.params['special_time']
do_install = state == 'present'
changed = False
res_args = dict()
# Ensure all files generated are only writable by the owning user. Primarily relevant for the cron_file option.
os.umask(022)
crontab = CronTab(module, user, cron_file)
if crontab.syslogging:
syslog.openlog('ansible-%s' % os.path.basename(__file__))
syslog.syslog(syslog.LOG_NOTICE, 'cron instantiated - name: "%s"' % name)
# --- user input validation ---
if (special_time or reboot) and \
(True in [(x != '*') for x in [minute, hour, day, month, weekday]]):
module.fail_json(msg="You must specify time and date fields or special time.")
if cron_file and do_install:
if not user:
module.fail_json(msg="To use cron_file=... parameter you must specify user=... as well")
if reboot and special_time:
module.fail_json(msg="reboot and special_time are mutually exclusive")
if name is None and do_install:
module.fail_json(msg="You must specify 'name' to install a new cron job")
if job is None and do_install:
module.fail_json(msg="You must specify 'job' to install a new cron job")
if job and name is None and not do_install:
module.fail_json(msg="You must specify 'name' to remove a cron job")
if reboot:
if special_time:
module.fail_json(msg="reboot and special_time are mutually exclusive")
else:
special_time = "reboot"
# if requested make a backup before making a change
if backup:
(backuph, backup_file) = tempfile.mkstemp(prefix='crontab')
crontab.write(backup_file)
if crontab.cron_file and not name and not do_install:
changed = crontab.remove_job_file()
module.exit_json(changed=changed,cron_file=cron_file,state=state)
job = crontab.get_cron_job(minute, hour, day, month, weekday, job, special_time)
old_job = crontab.find_job(name)
if do_install:
if len(old_job) == 0:
crontab.add_job(name, job)
changed = True
if len(old_job) > 0 and old_job[1] != job:
crontab.update_job(name, job)
changed = True
else:
if len(old_job) > 0:
crontab.remove_job(name)
changed = True
res_args = dict(
jobs = crontab.get_jobnames(), changed = changed
)
if changed:
crontab.write()
# retain the backup only if crontab or cron file have changed
if backup:
if changed:
res_args['backup_file'] = backup_file
else:
os.unlink(backup_file)
if cron_file:
res_args['cron_file'] = cron_file
module.exit_json(**res_args)
# --- should never get here
module.exit_json(msg="Unable to execute cron task.")
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
jiangzhuo/kbengine | kbe/src/lib/python/Lib/test/test_bigmem.py | 123 | 45241 | """Bigmem tests - tests for the 32-bit boundary in containers.
These tests try to exercise the 32-bit boundary that is sometimes, if
rarely, exceeded in practice, but almost never tested. They are really only
meaningful on 64-bit builds on machines with a *lot* of memory, but the
tests are always run, usually with very low memory limits to make sure the
tests themselves don't suffer from bitrot. To run them for real, pass a
high memory limit to regrtest, with the -M option.
"""
from test import support
from test.support import bigmemtest, _1G, _2G, _4G
import unittest
import operator
import sys
import functools
# These tests all use one of the bigmemtest decorators to indicate how much
# memory they use and how much memory they need to be even meaningful. The
# decorators take two arguments: a 'memuse' indicator declaring
# (approximate) bytes per size-unit the test will use (at peak usage), and a
# 'minsize' indicator declaring a minimum *useful* size. A test that
# allocates a bytestring to test various operations near the end will have a
# minsize of at least 2Gb (or it wouldn't reach the 32-bit limit, so the
# test wouldn't be very useful) and a memuse of 1 (one byte per size-unit,
# if it allocates only one big string at a time.)
#
# When run with a memory limit set, both decorators skip tests that need
# more memory than available to be meaningful. The precisionbigmemtest will
# always pass minsize as size, even if there is much more memory available.
# The bigmemtest decorator will scale size upward to fill available memory.
#
# Bigmem testing houserules:
#
# - Try not to allocate too many large objects. It's okay to rely on
# refcounting semantics, and don't forget that 's = create_largestring()'
# doesn't release the old 's' (if it exists) until well after its new
# value has been created. Use 'del s' before the create_largestring call.
#
# - Do *not* compare large objects using assertEqual, assertIn or similar.
# It's a lengthy operation and the errormessage will be utterly useless
# due to its size. To make sure whether a result has the right contents,
# better to use the strip or count methods, or compare meaningful slices.
#
# - Don't forget to test for large indices, offsets and results and such,
# in addition to large sizes. Anything that probes the 32-bit boundary.
#
# - When repeating an object (say, a substring, or a small list) to create
# a large object, make the subobject of a length that is not a power of
# 2. That way, int-wrapping problems are more easily detected.
#
# - Despite the bigmemtest decorator, all tests will actually be called
# with a much smaller number too, in the normal test run (5Kb currently.)
# This is so the tests themselves get frequent testing.
# Consequently, always make all large allocations based on the
# passed-in 'size', and don't rely on the size being very large. Also,
# memuse-per-size should remain sane (less than a few thousand); if your
# test uses more, adjust 'size' upward, instead.
# BEWARE: it seems that one failing test can yield other subsequent tests to
# fail as well. I do not know whether it is due to memory fragmentation
# issues, or other specifics of the platform malloc() routine.
ascii_char_size = 1
ucs2_char_size = 2
ucs4_char_size = 4
class BaseStrTest:
def _test_capitalize(self, size):
_ = self.from_latin1
SUBSTR = self.from_latin1(' abc def ghi')
s = _('-') * size + SUBSTR
caps = s.capitalize()
self.assertEqual(caps[-len(SUBSTR):],
SUBSTR.capitalize())
self.assertEqual(caps.lstrip(_('-')), SUBSTR)
@bigmemtest(size=_2G + 10, memuse=1)
def test_center(self, size):
SUBSTR = self.from_latin1(' abc def ghi')
s = SUBSTR.center(size)
self.assertEqual(len(s), size)
lpadsize = rpadsize = (len(s) - len(SUBSTR)) // 2
if len(s) % 2:
lpadsize += 1
self.assertEqual(s[lpadsize:-rpadsize], SUBSTR)
self.assertEqual(s.strip(), SUBSTR.strip())
@bigmemtest(size=_2G, memuse=2)
def test_count(self, size):
_ = self.from_latin1
SUBSTR = _(' abc def ghi')
s = _('.') * size + SUBSTR
self.assertEqual(s.count(_('.')), size)
s += _('.')
self.assertEqual(s.count(_('.')), size + 1)
self.assertEqual(s.count(_(' ')), 3)
self.assertEqual(s.count(_('i')), 1)
self.assertEqual(s.count(_('j')), 0)
@bigmemtest(size=_2G, memuse=2)
def test_endswith(self, size):
_ = self.from_latin1
SUBSTR = _(' abc def ghi')
s = _('-') * size + SUBSTR
self.assertTrue(s.endswith(SUBSTR))
self.assertTrue(s.endswith(s))
s2 = _('...') + s
self.assertTrue(s2.endswith(s))
self.assertFalse(s.endswith(_('a') + SUBSTR))
self.assertFalse(SUBSTR.endswith(s))
@bigmemtest(size=_2G + 10, memuse=2)
def test_expandtabs(self, size):
_ = self.from_latin1
s = _('-') * size
tabsize = 8
self.assertTrue(s.expandtabs() == s)
del s
slen, remainder = divmod(size, tabsize)
s = _(' \t') * slen
s = s.expandtabs(tabsize)
self.assertEqual(len(s), size - remainder)
self.assertEqual(len(s.strip(_(' '))), 0)
@bigmemtest(size=_2G, memuse=2)
def test_find(self, size):
_ = self.from_latin1
SUBSTR = _(' abc def ghi')
sublen = len(SUBSTR)
s = _('').join([SUBSTR, _('-') * size, SUBSTR])
self.assertEqual(s.find(_(' ')), 0)
self.assertEqual(s.find(SUBSTR), 0)
self.assertEqual(s.find(_(' '), sublen), sublen + size)
self.assertEqual(s.find(SUBSTR, len(SUBSTR)), sublen + size)
self.assertEqual(s.find(_('i')), SUBSTR.find(_('i')))
self.assertEqual(s.find(_('i'), sublen),
sublen + size + SUBSTR.find(_('i')))
self.assertEqual(s.find(_('i'), size),
sublen + size + SUBSTR.find(_('i')))
self.assertEqual(s.find(_('j')), -1)
@bigmemtest(size=_2G, memuse=2)
def test_index(self, size):
_ = self.from_latin1
SUBSTR = _(' abc def ghi')
sublen = len(SUBSTR)
s = _('').join([SUBSTR, _('-') * size, SUBSTR])
self.assertEqual(s.index(_(' ')), 0)
self.assertEqual(s.index(SUBSTR), 0)
self.assertEqual(s.index(_(' '), sublen), sublen + size)
self.assertEqual(s.index(SUBSTR, sublen), sublen + size)
self.assertEqual(s.index(_('i')), SUBSTR.index(_('i')))
self.assertEqual(s.index(_('i'), sublen),
sublen + size + SUBSTR.index(_('i')))
self.assertEqual(s.index(_('i'), size),
sublen + size + SUBSTR.index(_('i')))
self.assertRaises(ValueError, s.index, _('j'))
@bigmemtest(size=_2G, memuse=2)
def test_isalnum(self, size):
_ = self.from_latin1
SUBSTR = _('123456')
s = _('a') * size + SUBSTR
self.assertTrue(s.isalnum())
s += _('.')
self.assertFalse(s.isalnum())
@bigmemtest(size=_2G, memuse=2)
def test_isalpha(self, size):
_ = self.from_latin1
SUBSTR = _('zzzzzzz')
s = _('a') * size + SUBSTR
self.assertTrue(s.isalpha())
s += _('.')
self.assertFalse(s.isalpha())
@bigmemtest(size=_2G, memuse=2)
def test_isdigit(self, size):
_ = self.from_latin1
SUBSTR = _('123456')
s = _('9') * size + SUBSTR
self.assertTrue(s.isdigit())
s += _('z')
self.assertFalse(s.isdigit())
@bigmemtest(size=_2G, memuse=2)
def test_islower(self, size):
_ = self.from_latin1
chars = _(''.join(
chr(c) for c in range(255) if not chr(c).isupper()))
repeats = size // len(chars) + 2
s = chars * repeats
self.assertTrue(s.islower())
s += _('A')
self.assertFalse(s.islower())
@bigmemtest(size=_2G, memuse=2)
def test_isspace(self, size):
_ = self.from_latin1
whitespace = _(' \f\n\r\t\v')
repeats = size // len(whitespace) + 2
s = whitespace * repeats
self.assertTrue(s.isspace())
s += _('j')
self.assertFalse(s.isspace())
@bigmemtest(size=_2G, memuse=2)
def test_istitle(self, size):
_ = self.from_latin1
SUBSTR = _('123456')
s = _('').join([_('A'), _('a') * size, SUBSTR])
self.assertTrue(s.istitle())
s += _('A')
self.assertTrue(s.istitle())
s += _('aA')
self.assertFalse(s.istitle())
@bigmemtest(size=_2G, memuse=2)
def test_isupper(self, size):
_ = self.from_latin1
chars = _(''.join(
chr(c) for c in range(255) if not chr(c).islower()))
repeats = size // len(chars) + 2
s = chars * repeats
self.assertTrue(s.isupper())
s += _('a')
self.assertFalse(s.isupper())
@bigmemtest(size=_2G, memuse=2)
def test_join(self, size):
_ = self.from_latin1
s = _('A') * size
x = s.join([_('aaaaa'), _('bbbbb')])
self.assertEqual(x.count(_('a')), 5)
self.assertEqual(x.count(_('b')), 5)
self.assertTrue(x.startswith(_('aaaaaA')))
self.assertTrue(x.endswith(_('Abbbbb')))
@bigmemtest(size=_2G + 10, memuse=1)
def test_ljust(self, size):
_ = self.from_latin1
SUBSTR = _(' abc def ghi')
s = SUBSTR.ljust(size)
self.assertTrue(s.startswith(SUBSTR + _(' ')))
self.assertEqual(len(s), size)
self.assertEqual(s.strip(), SUBSTR.strip())
@bigmemtest(size=_2G + 10, memuse=2)
def test_lower(self, size):
_ = self.from_latin1
s = _('A') * size
s = s.lower()
self.assertEqual(len(s), size)
self.assertEqual(s.count(_('a')), size)
@bigmemtest(size=_2G + 10, memuse=1)
def test_lstrip(self, size):
_ = self.from_latin1
SUBSTR = _('abc def ghi')
s = SUBSTR.rjust(size)
self.assertEqual(len(s), size)
self.assertEqual(s.lstrip(), SUBSTR.lstrip())
del s
s = SUBSTR.ljust(size)
self.assertEqual(len(s), size)
# Type-specific optimization
if isinstance(s, (str, bytes)):
stripped = s.lstrip()
self.assertTrue(stripped is s)
@bigmemtest(size=_2G + 10, memuse=2)
def test_replace(self, size):
_ = self.from_latin1
replacement = _('a')
s = _(' ') * size
s = s.replace(_(' '), replacement)
self.assertEqual(len(s), size)
self.assertEqual(s.count(replacement), size)
s = s.replace(replacement, _(' '), size - 4)
self.assertEqual(len(s), size)
self.assertEqual(s.count(replacement), 4)
self.assertEqual(s[-10:], _(' aaaa'))
@bigmemtest(size=_2G, memuse=2)
def test_rfind(self, size):
_ = self.from_latin1
SUBSTR = _(' abc def ghi')
sublen = len(SUBSTR)
s = _('').join([SUBSTR, _('-') * size, SUBSTR])
self.assertEqual(s.rfind(_(' ')), sublen + size + SUBSTR.rfind(_(' ')))
self.assertEqual(s.rfind(SUBSTR), sublen + size)
self.assertEqual(s.rfind(_(' '), 0, size), SUBSTR.rfind(_(' ')))
self.assertEqual(s.rfind(SUBSTR, 0, sublen + size), 0)
self.assertEqual(s.rfind(_('i')), sublen + size + SUBSTR.rfind(_('i')))
self.assertEqual(s.rfind(_('i'), 0, sublen), SUBSTR.rfind(_('i')))
self.assertEqual(s.rfind(_('i'), 0, sublen + size),
SUBSTR.rfind(_('i')))
self.assertEqual(s.rfind(_('j')), -1)
@bigmemtest(size=_2G, memuse=2)
def test_rindex(self, size):
_ = self.from_latin1
SUBSTR = _(' abc def ghi')
sublen = len(SUBSTR)
s = _('').join([SUBSTR, _('-') * size, SUBSTR])
self.assertEqual(s.rindex(_(' ')),
sublen + size + SUBSTR.rindex(_(' ')))
self.assertEqual(s.rindex(SUBSTR), sublen + size)
self.assertEqual(s.rindex(_(' '), 0, sublen + size - 1),
SUBSTR.rindex(_(' ')))
self.assertEqual(s.rindex(SUBSTR, 0, sublen + size), 0)
self.assertEqual(s.rindex(_('i')),
sublen + size + SUBSTR.rindex(_('i')))
self.assertEqual(s.rindex(_('i'), 0, sublen), SUBSTR.rindex(_('i')))
self.assertEqual(s.rindex(_('i'), 0, sublen + size),
SUBSTR.rindex(_('i')))
self.assertRaises(ValueError, s.rindex, _('j'))
@bigmemtest(size=_2G + 10, memuse=1)
def test_rjust(self, size):
_ = self.from_latin1
SUBSTR = _(' abc def ghi')
s = SUBSTR.ljust(size)
self.assertTrue(s.startswith(SUBSTR + _(' ')))
self.assertEqual(len(s), size)
self.assertEqual(s.strip(), SUBSTR.strip())
@bigmemtest(size=_2G + 10, memuse=1)
def test_rstrip(self, size):
_ = self.from_latin1
SUBSTR = _(' abc def ghi')
s = SUBSTR.ljust(size)
self.assertEqual(len(s), size)
self.assertEqual(s.rstrip(), SUBSTR.rstrip())
del s
s = SUBSTR.rjust(size)
self.assertEqual(len(s), size)
# Type-specific optimization
if isinstance(s, (str, bytes)):
stripped = s.rstrip()
self.assertTrue(stripped is s)
# The test takes about size bytes to build a string, and then about
# sqrt(size) substrings of sqrt(size) in size and a list to
# hold sqrt(size) items. It's close but just over 2x size.
@bigmemtest(size=_2G, memuse=2.1)
def test_split_small(self, size):
_ = self.from_latin1
# Crudely calculate an estimate so that the result of s.split won't
# take up an inordinate amount of memory
chunksize = int(size ** 0.5 + 2)
SUBSTR = _('a') + _(' ') * chunksize
s = SUBSTR * chunksize
l = s.split()
self.assertEqual(len(l), chunksize)
expected = _('a')
for item in l:
self.assertEqual(item, expected)
del l
l = s.split(_('a'))
self.assertEqual(len(l), chunksize + 1)
expected = _(' ') * chunksize
for item in filter(None, l):
self.assertEqual(item, expected)
# Allocates a string of twice size (and briefly two) and a list of
# size. Because of internal affairs, the s.split() call produces a
# list of size times the same one-character string, so we only
# suffer for the list size. (Otherwise, it'd cost another 48 times
# size in bytes!) Nevertheless, a list of size takes
# 8*size bytes.
@bigmemtest(size=_2G + 5, memuse=2 * ascii_char_size + 8)
def test_split_large(self, size):
_ = self.from_latin1
s = _(' a') * size + _(' ')
l = s.split()
self.assertEqual(len(l), size)
self.assertEqual(set(l), set([_('a')]))
del l
l = s.split(_('a'))
self.assertEqual(len(l), size + 1)
self.assertEqual(set(l), set([_(' ')]))
@bigmemtest(size=_2G, memuse=2.1)
def test_splitlines(self, size):
_ = self.from_latin1
# Crudely calculate an estimate so that the result of s.split won't
# take up an inordinate amount of memory
chunksize = int(size ** 0.5 + 2) // 2
SUBSTR = _(' ') * chunksize + _('\n') + _(' ') * chunksize + _('\r\n')
s = SUBSTR * (chunksize * 2)
l = s.splitlines()
self.assertEqual(len(l), chunksize * 4)
expected = _(' ') * chunksize
for item in l:
self.assertEqual(item, expected)
@bigmemtest(size=_2G, memuse=2)
def test_startswith(self, size):
_ = self.from_latin1
SUBSTR = _(' abc def ghi')
s = _('-') * size + SUBSTR
self.assertTrue(s.startswith(s))
self.assertTrue(s.startswith(_('-') * size))
self.assertFalse(s.startswith(SUBSTR))
@bigmemtest(size=_2G, memuse=1)
def test_strip(self, size):
_ = self.from_latin1
SUBSTR = _(' abc def ghi ')
s = SUBSTR.rjust(size)
self.assertEqual(len(s), size)
self.assertEqual(s.strip(), SUBSTR.strip())
del s
s = SUBSTR.ljust(size)
self.assertEqual(len(s), size)
self.assertEqual(s.strip(), SUBSTR.strip())
def _test_swapcase(self, size):
_ = self.from_latin1
SUBSTR = _("aBcDeFG12.'\xa9\x00")
sublen = len(SUBSTR)
repeats = size // sublen + 2
s = SUBSTR * repeats
s = s.swapcase()
self.assertEqual(len(s), sublen * repeats)
self.assertEqual(s[:sublen * 3], SUBSTR.swapcase() * 3)
self.assertEqual(s[-sublen * 3:], SUBSTR.swapcase() * 3)
def _test_title(self, size):
_ = self.from_latin1
SUBSTR = _('SpaaHAaaAaham')
s = SUBSTR * (size // len(SUBSTR) + 2)
s = s.title()
self.assertTrue(s.startswith((SUBSTR * 3).title()))
self.assertTrue(s.endswith(SUBSTR.lower() * 3))
@bigmemtest(size=_2G, memuse=2)
def test_translate(self, size):
_ = self.from_latin1
SUBSTR = _('aZz.z.Aaz.')
trans = bytes.maketrans(b'.aZ', b'-!$')
sublen = len(SUBSTR)
repeats = size // sublen + 2
s = SUBSTR * repeats
s = s.translate(trans)
self.assertEqual(len(s), repeats * sublen)
self.assertEqual(s[:sublen], SUBSTR.translate(trans))
self.assertEqual(s[-sublen:], SUBSTR.translate(trans))
self.assertEqual(s.count(_('.')), 0)
self.assertEqual(s.count(_('!')), repeats * 2)
self.assertEqual(s.count(_('z')), repeats * 3)
@bigmemtest(size=_2G + 5, memuse=2)
def test_upper(self, size):
_ = self.from_latin1
s = _('a') * size
s = s.upper()
self.assertEqual(len(s), size)
self.assertEqual(s.count(_('A')), size)
@bigmemtest(size=_2G + 20, memuse=1)
def test_zfill(self, size):
_ = self.from_latin1
SUBSTR = _('-568324723598234')
s = SUBSTR.zfill(size)
self.assertTrue(s.endswith(_('0') + SUBSTR[1:]))
self.assertTrue(s.startswith(_('-0')))
self.assertEqual(len(s), size)
self.assertEqual(s.count(_('0')), size - len(SUBSTR))
# This test is meaningful even with size < 2G, as long as the
# doubled string is > 2G (but it tests more if both are > 2G :)
@bigmemtest(size=_1G + 2, memuse=3)
def test_concat(self, size):
_ = self.from_latin1
s = _('.') * size
self.assertEqual(len(s), size)
s = s + s
self.assertEqual(len(s), size * 2)
self.assertEqual(s.count(_('.')), size * 2)
# This test is meaningful even with size < 2G, as long as the
# repeated string is > 2G (but it tests more if both are > 2G :)
@bigmemtest(size=_1G + 2, memuse=3)
def test_repeat(self, size):
_ = self.from_latin1
s = _('.') * size
self.assertEqual(len(s), size)
s = s * 2
self.assertEqual(len(s), size * 2)
self.assertEqual(s.count(_('.')), size * 2)
@bigmemtest(size=_2G + 20, memuse=2)
def test_slice_and_getitem(self, size):
_ = self.from_latin1
SUBSTR = _('0123456789')
sublen = len(SUBSTR)
s = SUBSTR * (size // sublen)
stepsize = len(s) // 100
stepsize = stepsize - (stepsize % sublen)
for i in range(0, len(s) - stepsize, stepsize):
self.assertEqual(s[i], SUBSTR[0])
self.assertEqual(s[i:i + sublen], SUBSTR)
self.assertEqual(s[i:i + sublen:2], SUBSTR[::2])
if i > 0:
self.assertEqual(s[i + sublen - 1:i - 1:-3],
SUBSTR[sublen::-3])
# Make sure we do some slicing and indexing near the end of the
# string, too.
self.assertEqual(s[len(s) - 1], SUBSTR[-1])
self.assertEqual(s[-1], SUBSTR[-1])
self.assertEqual(s[len(s) - 10], SUBSTR[0])
self.assertEqual(s[-sublen], SUBSTR[0])
self.assertEqual(s[len(s):], _(''))
self.assertEqual(s[len(s) - 1:], SUBSTR[-1:])
self.assertEqual(s[-1:], SUBSTR[-1:])
self.assertEqual(s[len(s) - sublen:], SUBSTR)
self.assertEqual(s[-sublen:], SUBSTR)
self.assertEqual(len(s[:]), len(s))
self.assertEqual(len(s[:len(s) - 5]), len(s) - 5)
self.assertEqual(len(s[5:-5]), len(s) - 10)
self.assertRaises(IndexError, operator.getitem, s, len(s))
self.assertRaises(IndexError, operator.getitem, s, len(s) + 1)
self.assertRaises(IndexError, operator.getitem, s, len(s) + 1<<31)
@bigmemtest(size=_2G, memuse=2)
def test_contains(self, size):
_ = self.from_latin1
SUBSTR = _('0123456789')
edge = _('-') * (size // 2)
s = _('').join([edge, SUBSTR, edge])
del edge
self.assertTrue(SUBSTR in s)
self.assertFalse(SUBSTR * 2 in s)
self.assertTrue(_('-') in s)
self.assertFalse(_('a') in s)
s += _('a')
self.assertTrue(_('a') in s)
@bigmemtest(size=_2G + 10, memuse=2)
def test_compare(self, size):
_ = self.from_latin1
s1 = _('-') * size
s2 = _('-') * size
self.assertTrue(s1 == s2)
del s2
s2 = s1 + _('a')
self.assertFalse(s1 == s2)
del s2
s2 = _('.') * size
self.assertFalse(s1 == s2)
@bigmemtest(size=_2G + 10, memuse=1)
def test_hash(self, size):
# Not sure if we can do any meaningful tests here... Even if we
# start relying on the exact algorithm used, the result will be
# different depending on the size of the C 'long int'. Even this
# test is dodgy (there's no *guarantee* that the two things should
# have a different hash, even if they, in the current
# implementation, almost always do.)
_ = self.from_latin1
s = _('\x00') * size
h1 = hash(s)
del s
s = _('\x00') * (size + 1)
self.assertNotEqual(h1, hash(s))
class StrTest(unittest.TestCase, BaseStrTest):
def from_latin1(self, s):
return s
def basic_encode_test(self, size, enc, c='.', expectedsize=None):
if expectedsize is None:
expectedsize = size
try:
s = c * size
self.assertEqual(len(s.encode(enc)), expectedsize)
finally:
s = None
def setUp(self):
# HACK: adjust memory use of tests inherited from BaseStrTest
# according to character size.
self._adjusted = {}
for name in dir(BaseStrTest):
if not name.startswith('test_'):
continue
meth = getattr(type(self), name)
try:
memuse = meth.memuse
except AttributeError:
continue
meth.memuse = ascii_char_size * memuse
self._adjusted[name] = memuse
def tearDown(self):
for name, memuse in self._adjusted.items():
getattr(type(self), name).memuse = memuse
@bigmemtest(size=_2G, memuse=ucs4_char_size * 3)
def test_capitalize(self, size):
self._test_capitalize(size)
@bigmemtest(size=_2G, memuse=ucs4_char_size * 3)
def test_title(self, size):
self._test_title(size)
@bigmemtest(size=_2G, memuse=ucs4_char_size * 3)
def test_swapcase(self, size):
self._test_swapcase(size)
# Many codecs convert to the legacy representation first, explaining
# why we add 'ucs4_char_size' to the 'memuse' below.
@bigmemtest(size=_2G + 2, memuse=ascii_char_size + 1)
def test_encode(self, size):
return self.basic_encode_test(size, 'utf-8')
@bigmemtest(size=_4G // 6 + 2, memuse=ascii_char_size + ucs4_char_size + 1)
def test_encode_raw_unicode_escape(self, size):
try:
return self.basic_encode_test(size, 'raw_unicode_escape')
except MemoryError:
pass # acceptable on 32-bit
@bigmemtest(size=_4G // 5 + 70, memuse=ascii_char_size + ucs4_char_size + 1)
def test_encode_utf7(self, size):
try:
return self.basic_encode_test(size, 'utf7')
except MemoryError:
pass # acceptable on 32-bit
@bigmemtest(size=_4G // 4 + 5, memuse=ascii_char_size + ucs4_char_size + 4)
def test_encode_utf32(self, size):
try:
return self.basic_encode_test(size, 'utf32', expectedsize=4 * size + 4)
except MemoryError:
pass # acceptable on 32-bit
@bigmemtest(size=_2G - 1, memuse=ascii_char_size + 1)
def test_encode_ascii(self, size):
return self.basic_encode_test(size, 'ascii', c='A')
# str % (...) uses a Py_UCS4 intermediate representation
@bigmemtest(size=_2G + 10, memuse=ascii_char_size * 2 + ucs4_char_size)
def test_format(self, size):
s = '-' * size
sf = '%s' % (s,)
self.assertTrue(s == sf)
del sf
sf = '..%s..' % (s,)
self.assertEqual(len(sf), len(s) + 4)
self.assertTrue(sf.startswith('..-'))
self.assertTrue(sf.endswith('-..'))
del s, sf
size //= 2
edge = '-' * size
s = ''.join([edge, '%s', edge])
del edge
s = s % '...'
self.assertEqual(len(s), size * 2 + 3)
self.assertEqual(s.count('.'), 3)
self.assertEqual(s.count('-'), size * 2)
@bigmemtest(size=_2G + 10, memuse=ascii_char_size * 2)
def test_repr_small(self, size):
s = '-' * size
s = repr(s)
self.assertEqual(len(s), size + 2)
self.assertEqual(s[0], "'")
self.assertEqual(s[-1], "'")
self.assertEqual(s.count('-'), size)
del s
# repr() will create a string four times as large as this 'binary
# string', but we don't want to allocate much more than twice
# size in total. (We do extra testing in test_repr_large())
size = size // 5 * 2
s = '\x00' * size
s = repr(s)
self.assertEqual(len(s), size * 4 + 2)
self.assertEqual(s[0], "'")
self.assertEqual(s[-1], "'")
self.assertEqual(s.count('\\'), size)
self.assertEqual(s.count('0'), size * 2)
@bigmemtest(size=_2G + 10, memuse=ascii_char_size * 5)
def test_repr_large(self, size):
s = '\x00' * size
s = repr(s)
self.assertEqual(len(s), size * 4 + 2)
self.assertEqual(s[0], "'")
self.assertEqual(s[-1], "'")
self.assertEqual(s.count('\\'), size)
self.assertEqual(s.count('0'), size * 2)
# ascii() calls encode('ascii', 'backslashreplace'), which itself
# creates a temporary Py_UNICODE representation in addition to the
# original (Py_UCS2) one
# There's also some overallocation when resizing the ascii() result
# that isn't taken into account here.
@bigmemtest(size=_2G // 5 + 1, memuse=ucs2_char_size +
ucs4_char_size + ascii_char_size * 6)
def test_unicode_repr(self, size):
# Use an assigned, but not printable code point.
# It is in the range of the low surrogates \uDC00-\uDFFF.
char = "\uDCBA"
s = char * size
try:
for f in (repr, ascii):
r = f(s)
self.assertEqual(len(r), 2 + (len(f(char)) - 2) * size)
self.assertTrue(r.endswith(r"\udcba'"), r[-10:])
r = None
finally:
r = s = None
@bigmemtest(size=_2G // 5 + 1, memuse=ucs4_char_size * 2 + ascii_char_size * 10)
def test_unicode_repr_wide(self, size):
char = "\U0001DCBA"
s = char * size
try:
for f in (repr, ascii):
r = f(s)
self.assertEqual(len(r), 2 + (len(f(char)) - 2) * size)
self.assertTrue(r.endswith(r"\U0001dcba'"), r[-12:])
r = None
finally:
r = s = None
# The original test_translate is overriden here, so as to get the
# correct size estimate: str.translate() uses an intermediate Py_UCS4
# representation.
@bigmemtest(size=_2G, memuse=ascii_char_size * 2 + ucs4_char_size)
def test_translate(self, size):
_ = self.from_latin1
SUBSTR = _('aZz.z.Aaz.')
trans = {
ord(_('.')): _('-'),
ord(_('a')): _('!'),
ord(_('Z')): _('$'),
}
sublen = len(SUBSTR)
repeats = size // sublen + 2
s = SUBSTR * repeats
s = s.translate(trans)
self.assertEqual(len(s), repeats * sublen)
self.assertEqual(s[:sublen], SUBSTR.translate(trans))
self.assertEqual(s[-sublen:], SUBSTR.translate(trans))
self.assertEqual(s.count(_('.')), 0)
self.assertEqual(s.count(_('!')), repeats * 2)
self.assertEqual(s.count(_('z')), repeats * 3)
class BytesTest(unittest.TestCase, BaseStrTest):
def from_latin1(self, s):
return s.encode("latin-1")
@bigmemtest(size=_2G + 2, memuse=1 + ascii_char_size)
def test_decode(self, size):
s = self.from_latin1('.') * size
self.assertEqual(len(s.decode('utf-8')), size)
@bigmemtest(size=_2G, memuse=2)
def test_capitalize(self, size):
self._test_capitalize(size)
@bigmemtest(size=_2G, memuse=2)
def test_title(self, size):
self._test_title(size)
@bigmemtest(size=_2G, memuse=2)
def test_swapcase(self, size):
self._test_swapcase(size)
class BytearrayTest(unittest.TestCase, BaseStrTest):
def from_latin1(self, s):
return bytearray(s.encode("latin-1"))
@bigmemtest(size=_2G + 2, memuse=1 + ascii_char_size)
def test_decode(self, size):
s = self.from_latin1('.') * size
self.assertEqual(len(s.decode('utf-8')), size)
@bigmemtest(size=_2G, memuse=2)
def test_capitalize(self, size):
self._test_capitalize(size)
@bigmemtest(size=_2G, memuse=2)
def test_title(self, size):
self._test_title(size)
@bigmemtest(size=_2G, memuse=2)
def test_swapcase(self, size):
self._test_swapcase(size)
test_hash = None
test_split_large = None
class TupleTest(unittest.TestCase):
# Tuples have a small, fixed-sized head and an array of pointers to
# data. Since we're testing 64-bit addressing, we can assume that the
# pointers are 8 bytes, and that thus that the tuples take up 8 bytes
# per size.
# As a side-effect of testing long tuples, these tests happen to test
# having more than 2<<31 references to any given object. Hence the
# use of different types of objects as contents in different tests.
@bigmemtest(size=_2G + 2, memuse=16)
def test_compare(self, size):
t1 = ('',) * size
t2 = ('',) * size
self.assertTrue(t1 == t2)
del t2
t2 = ('',) * (size + 1)
self.assertFalse(t1 == t2)
del t2
t2 = (1,) * size
self.assertFalse(t1 == t2)
# Test concatenating into a single tuple of more than 2G in length,
# and concatenating a tuple of more than 2G in length separately, so
# the smaller test still gets run even if there isn't memory for the
# larger test (but we still let the tester know the larger test is
# skipped, in verbose mode.)
def basic_concat_test(self, size):
t = ((),) * size
self.assertEqual(len(t), size)
t = t + t
self.assertEqual(len(t), size * 2)
@bigmemtest(size=_2G // 2 + 2, memuse=24)
def test_concat_small(self, size):
return self.basic_concat_test(size)
@bigmemtest(size=_2G + 2, memuse=24)
def test_concat_large(self, size):
return self.basic_concat_test(size)
@bigmemtest(size=_2G // 5 + 10, memuse=8 * 5)
def test_contains(self, size):
t = (1, 2, 3, 4, 5) * size
self.assertEqual(len(t), size * 5)
self.assertTrue(5 in t)
self.assertFalse((1, 2, 3, 4, 5) in t)
self.assertFalse(0 in t)
@bigmemtest(size=_2G + 10, memuse=8)
def test_hash(self, size):
t1 = (0,) * size
h1 = hash(t1)
del t1
t2 = (0,) * (size + 1)
self.assertFalse(h1 == hash(t2))
@bigmemtest(size=_2G + 10, memuse=8)
def test_index_and_slice(self, size):
t = (None,) * size
self.assertEqual(len(t), size)
self.assertEqual(t[-1], None)
self.assertEqual(t[5], None)
self.assertEqual(t[size - 1], None)
self.assertRaises(IndexError, operator.getitem, t, size)
self.assertEqual(t[:5], (None,) * 5)
self.assertEqual(t[-5:], (None,) * 5)
self.assertEqual(t[20:25], (None,) * 5)
self.assertEqual(t[-25:-20], (None,) * 5)
self.assertEqual(t[size - 5:], (None,) * 5)
self.assertEqual(t[size - 5:size], (None,) * 5)
self.assertEqual(t[size - 6:size - 2], (None,) * 4)
self.assertEqual(t[size:size], ())
self.assertEqual(t[size:size+5], ())
# Like test_concat, split in two.
def basic_test_repeat(self, size):
t = ('',) * size
self.assertEqual(len(t), size)
t = t * 2
self.assertEqual(len(t), size * 2)
@bigmemtest(size=_2G // 2 + 2, memuse=24)
def test_repeat_small(self, size):
return self.basic_test_repeat(size)
@bigmemtest(size=_2G + 2, memuse=24)
def test_repeat_large(self, size):
return self.basic_test_repeat(size)
@bigmemtest(size=_1G - 1, memuse=12)
def test_repeat_large_2(self, size):
return self.basic_test_repeat(size)
@bigmemtest(size=_1G - 1, memuse=9)
def test_from_2G_generator(self, size):
self.skipTest("test needs much more memory than advertised, see issue5438")
try:
t = tuple(range(size))
except MemoryError:
pass # acceptable on 32-bit
else:
count = 0
for item in t:
self.assertEqual(item, count)
count += 1
self.assertEqual(count, size)
@bigmemtest(size=_1G - 25, memuse=9)
def test_from_almost_2G_generator(self, size):
self.skipTest("test needs much more memory than advertised, see issue5438")
try:
t = tuple(range(size))
count = 0
for item in t:
self.assertEqual(item, count)
count += 1
self.assertEqual(count, size)
except MemoryError:
pass # acceptable, expected on 32-bit
# Like test_concat, split in two.
def basic_test_repr(self, size):
t = (0,) * size
s = repr(t)
# The repr of a tuple of 0's is exactly three times the tuple length.
self.assertEqual(len(s), size * 3)
self.assertEqual(s[:5], '(0, 0')
self.assertEqual(s[-5:], '0, 0)')
self.assertEqual(s.count('0'), size)
@bigmemtest(size=_2G // 3 + 2, memuse=8 + 3 * ascii_char_size)
def test_repr_small(self, size):
return self.basic_test_repr(size)
@bigmemtest(size=_2G + 2, memuse=8 + 3 * ascii_char_size)
def test_repr_large(self, size):
return self.basic_test_repr(size)
class ListTest(unittest.TestCase):
# Like tuples, lists have a small, fixed-sized head and an array of
# pointers to data, so 8 bytes per size. Also like tuples, we make the
# lists hold references to various objects to test their refcount
# limits.
@bigmemtest(size=_2G + 2, memuse=16)
def test_compare(self, size):
l1 = [''] * size
l2 = [''] * size
self.assertTrue(l1 == l2)
del l2
l2 = [''] * (size + 1)
self.assertFalse(l1 == l2)
del l2
l2 = [2] * size
self.assertFalse(l1 == l2)
# Test concatenating into a single list of more than 2G in length,
# and concatenating a list of more than 2G in length separately, so
# the smaller test still gets run even if there isn't memory for the
# larger test (but we still let the tester know the larger test is
# skipped, in verbose mode.)
def basic_test_concat(self, size):
l = [[]] * size
self.assertEqual(len(l), size)
l = l + l
self.assertEqual(len(l), size * 2)
@bigmemtest(size=_2G // 2 + 2, memuse=24)
def test_concat_small(self, size):
return self.basic_test_concat(size)
@bigmemtest(size=_2G + 2, memuse=24)
def test_concat_large(self, size):
return self.basic_test_concat(size)
def basic_test_inplace_concat(self, size):
l = [sys.stdout] * size
l += l
self.assertEqual(len(l), size * 2)
self.assertTrue(l[0] is l[-1])
self.assertTrue(l[size - 1] is l[size + 1])
@bigmemtest(size=_2G // 2 + 2, memuse=24)
def test_inplace_concat_small(self, size):
return self.basic_test_inplace_concat(size)
@bigmemtest(size=_2G + 2, memuse=24)
def test_inplace_concat_large(self, size):
return self.basic_test_inplace_concat(size)
@bigmemtest(size=_2G // 5 + 10, memuse=8 * 5)
def test_contains(self, size):
l = [1, 2, 3, 4, 5] * size
self.assertEqual(len(l), size * 5)
self.assertTrue(5 in l)
self.assertFalse([1, 2, 3, 4, 5] in l)
self.assertFalse(0 in l)
@bigmemtest(size=_2G + 10, memuse=8)
def test_hash(self, size):
l = [0] * size
self.assertRaises(TypeError, hash, l)
@bigmemtest(size=_2G + 10, memuse=8)
def test_index_and_slice(self, size):
l = [None] * size
self.assertEqual(len(l), size)
self.assertEqual(l[-1], None)
self.assertEqual(l[5], None)
self.assertEqual(l[size - 1], None)
self.assertRaises(IndexError, operator.getitem, l, size)
self.assertEqual(l[:5], [None] * 5)
self.assertEqual(l[-5:], [None] * 5)
self.assertEqual(l[20:25], [None] * 5)
self.assertEqual(l[-25:-20], [None] * 5)
self.assertEqual(l[size - 5:], [None] * 5)
self.assertEqual(l[size - 5:size], [None] * 5)
self.assertEqual(l[size - 6:size - 2], [None] * 4)
self.assertEqual(l[size:size], [])
self.assertEqual(l[size:size+5], [])
l[size - 2] = 5
self.assertEqual(len(l), size)
self.assertEqual(l[-3:], [None, 5, None])
self.assertEqual(l.count(5), 1)
self.assertRaises(IndexError, operator.setitem, l, size, 6)
self.assertEqual(len(l), size)
l[size - 7:] = [1, 2, 3, 4, 5]
size -= 2
self.assertEqual(len(l), size)
self.assertEqual(l[-7:], [None, None, 1, 2, 3, 4, 5])
l[:7] = [1, 2, 3, 4, 5]
size -= 2
self.assertEqual(len(l), size)
self.assertEqual(l[:7], [1, 2, 3, 4, 5, None, None])
del l[size - 1]
size -= 1
self.assertEqual(len(l), size)
self.assertEqual(l[-1], 4)
del l[-2:]
size -= 2
self.assertEqual(len(l), size)
self.assertEqual(l[-1], 2)
del l[0]
size -= 1
self.assertEqual(len(l), size)
self.assertEqual(l[0], 2)
del l[:2]
size -= 2
self.assertEqual(len(l), size)
self.assertEqual(l[0], 4)
# Like test_concat, split in two.
def basic_test_repeat(self, size):
l = [] * size
self.assertFalse(l)
l = [''] * size
self.assertEqual(len(l), size)
l = l * 2
self.assertEqual(len(l), size * 2)
@bigmemtest(size=_2G // 2 + 2, memuse=24)
def test_repeat_small(self, size):
return self.basic_test_repeat(size)
@bigmemtest(size=_2G + 2, memuse=24)
def test_repeat_large(self, size):
return self.basic_test_repeat(size)
def basic_test_inplace_repeat(self, size):
l = ['']
l *= size
self.assertEqual(len(l), size)
self.assertTrue(l[0] is l[-1])
del l
l = [''] * size
l *= 2
self.assertEqual(len(l), size * 2)
self.assertTrue(l[size - 1] is l[-1])
@bigmemtest(size=_2G // 2 + 2, memuse=16)
def test_inplace_repeat_small(self, size):
return self.basic_test_inplace_repeat(size)
@bigmemtest(size=_2G + 2, memuse=16)
def test_inplace_repeat_large(self, size):
return self.basic_test_inplace_repeat(size)
def basic_test_repr(self, size):
l = [0] * size
s = repr(l)
# The repr of a list of 0's is exactly three times the list length.
self.assertEqual(len(s), size * 3)
self.assertEqual(s[:5], '[0, 0')
self.assertEqual(s[-5:], '0, 0]')
self.assertEqual(s.count('0'), size)
@bigmemtest(size=_2G // 3 + 2, memuse=8 + 3 * ascii_char_size)
def test_repr_small(self, size):
return self.basic_test_repr(size)
@bigmemtest(size=_2G + 2, memuse=8 + 3 * ascii_char_size)
def test_repr_large(self, size):
return self.basic_test_repr(size)
# list overallocates ~1/8th of the total size (on first expansion) so
# the single list.append call puts memuse at 9 bytes per size.
@bigmemtest(size=_2G, memuse=9)
def test_append(self, size):
l = [object()] * size
l.append(object())
self.assertEqual(len(l), size+1)
self.assertTrue(l[-3] is l[-2])
self.assertFalse(l[-2] is l[-1])
@bigmemtest(size=_2G // 5 + 2, memuse=8 * 5)
def test_count(self, size):
l = [1, 2, 3, 4, 5] * size
self.assertEqual(l.count(1), size)
self.assertEqual(l.count("1"), 0)
def basic_test_extend(self, size):
l = [object] * size
l.extend(l)
self.assertEqual(len(l), size * 2)
self.assertTrue(l[0] is l[-1])
self.assertTrue(l[size - 1] is l[size + 1])
@bigmemtest(size=_2G // 2 + 2, memuse=16)
def test_extend_small(self, size):
return self.basic_test_extend(size)
@bigmemtest(size=_2G + 2, memuse=16)
def test_extend_large(self, size):
return self.basic_test_extend(size)
@bigmemtest(size=_2G // 5 + 2, memuse=8 * 5)
def test_index(self, size):
l = [1, 2, 3, 4, 5] * size
size *= 5
self.assertEqual(l.index(1), 0)
self.assertEqual(l.index(5, size - 5), size - 1)
self.assertEqual(l.index(5, size - 5, size), size - 1)
self.assertRaises(ValueError, l.index, 1, size - 4, size)
self.assertRaises(ValueError, l.index, 6)
# This tests suffers from overallocation, just like test_append.
@bigmemtest(size=_2G + 10, memuse=9)
def test_insert(self, size):
l = [1.0] * size
l.insert(size - 1, "A")
size += 1
self.assertEqual(len(l), size)
self.assertEqual(l[-3:], [1.0, "A", 1.0])
l.insert(size + 1, "B")
size += 1
self.assertEqual(len(l), size)
self.assertEqual(l[-3:], ["A", 1.0, "B"])
l.insert(1, "C")
size += 1
self.assertEqual(len(l), size)
self.assertEqual(l[:3], [1.0, "C", 1.0])
self.assertEqual(l[size - 3:], ["A", 1.0, "B"])
@bigmemtest(size=_2G // 5 + 4, memuse=8 * 5)
def test_pop(self, size):
l = ["a", "b", "c", "d", "e"] * size
size *= 5
self.assertEqual(len(l), size)
item = l.pop()
size -= 1
self.assertEqual(len(l), size)
self.assertEqual(item, "e")
self.assertEqual(l[-2:], ["c", "d"])
item = l.pop(0)
size -= 1
self.assertEqual(len(l), size)
self.assertEqual(item, "a")
self.assertEqual(l[:2], ["b", "c"])
item = l.pop(size - 2)
size -= 1
self.assertEqual(len(l), size)
self.assertEqual(item, "c")
self.assertEqual(l[-2:], ["b", "d"])
@bigmemtest(size=_2G + 10, memuse=8)
def test_remove(self, size):
l = [10] * size
self.assertEqual(len(l), size)
l.remove(10)
size -= 1
self.assertEqual(len(l), size)
# Because of the earlier l.remove(), this append doesn't trigger
# a resize.
l.append(5)
size += 1
self.assertEqual(len(l), size)
self.assertEqual(l[-2:], [10, 5])
l.remove(5)
size -= 1
self.assertEqual(len(l), size)
self.assertEqual(l[-2:], [10, 10])
@bigmemtest(size=_2G // 5 + 2, memuse=8 * 5)
def test_reverse(self, size):
l = [1, 2, 3, 4, 5] * size
l.reverse()
self.assertEqual(len(l), size * 5)
self.assertEqual(l[-5:], [5, 4, 3, 2, 1])
self.assertEqual(l[:5], [5, 4, 3, 2, 1])
@bigmemtest(size=_2G // 5 + 2, memuse=8 * 5)
def test_sort(self, size):
l = [1, 2, 3, 4, 5] * size
l.sort()
self.assertEqual(len(l), size * 5)
self.assertEqual(l.count(1), size)
self.assertEqual(l[:10], [1] * 10)
self.assertEqual(l[-10:], [5] * 10)
def test_main():
support.run_unittest(StrTest, BytesTest, BytearrayTest,
TupleTest, ListTest)
if __name__ == '__main__':
if len(sys.argv) > 1:
support.set_memlimit(sys.argv[1])
test_main()
| lgpl-3.0 |
shurihell/testasia | lms/djangoapps/instructor/tests/utils.py | 121 | 2732 | """
Utilities for instructor unit tests
"""
import datetime
import json
import random
from django.utils.timezone import utc
from util.date_utils import get_default_time_display
class FakeInfo(object):
"""Parent class for faking objects used in tests"""
FEATURES = []
def __init__(self):
for feature in self.FEATURES:
setattr(self, feature, u'expected')
def to_dict(self):
""" Returns a dict representation of the object """
return {key: getattr(self, key) for key in self.FEATURES}
class FakeContentTask(FakeInfo):
""" Fake task info needed for email content list """
FEATURES = [
'task_input',
'task_output',
'requester',
]
def __init__(self, email_id, num_sent, num_failed, sent_to):
super(FakeContentTask, self).__init__()
self.task_input = {'email_id': email_id, 'to_option': sent_to}
self.task_input = json.dumps(self.task_input)
self.task_output = {'succeeded': num_sent, 'failed': num_failed}
self.task_output = json.dumps(self.task_output)
self.requester = 'expected'
def make_invalid_input(self):
"""Corrupt the task input field to test errors"""
self.task_input = "THIS IS INVALID JSON"
class FakeEmail(FakeInfo):
""" Corresponding fake email for a fake task """
FEATURES = [
'subject',
'html_message',
'id',
'created',
]
def __init__(self, email_id):
super(FakeEmail, self).__init__()
self.id = unicode(email_id) # pylint: disable=invalid-name
# Select a random data for create field
year = random.randint(1950, 2000)
month = random.randint(1, 12)
day = random.randint(1, 28)
hour = random.randint(0, 23)
minute = random.randint(0, 59)
self.created = datetime.datetime(year, month, day, hour, minute, tzinfo=utc)
class FakeEmailInfo(FakeInfo):
""" Fake email information object """
FEATURES = [
u'created',
u'sent_to',
u'email',
u'number_sent',
u'requester',
]
EMAIL_FEATURES = [
u'subject',
u'html_message',
u'id'
]
def __init__(self, fake_email, num_sent, num_failed):
super(FakeEmailInfo, self).__init__()
self.created = get_default_time_display(fake_email.created)
number_sent = str(num_sent) + ' sent'
if num_failed > 0:
number_sent += ', ' + str(num_failed) + " failed"
self.number_sent = number_sent
fake_email_dict = fake_email.to_dict()
self.email = {feature: fake_email_dict[feature] for feature in self.EMAIL_FEATURES}
self.requester = u'expected'
| agpl-3.0 |
nmartensen/pandas | asv_bench/benchmarks/gil.py | 7 | 11003 | from .pandas_vb_common import *
from pandas.core.algorithms import take_1d
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
try:
from pandas._libs import algos
except ImportError:
from pandas import algos
try:
from pandas.util.testing import test_parallel
have_real_test_parallel = True
except ImportError:
have_real_test_parallel = False
def test_parallel(num_threads=1):
def wrapper(fname):
return fname
return wrapper
class NoGilGroupby(object):
goal_time = 0.2
def setup(self):
self.N = 1000000
self.ngroups = 1000
np.random.seed(1234)
self.df = DataFrame({'key': np.random.randint(0, self.ngroups, size=self.N), 'data': np.random.randn(self.N), })
np.random.seed(1234)
self.size = 2 ** 22
self.ngroups = 100
self.data = Series(np.random.randint(0, self.ngroups, size=self.size))
if (not have_real_test_parallel):
raise NotImplementedError
@test_parallel(num_threads=2)
def _pg2_count(self):
self.df.groupby('key')['data'].count()
def time_count_2(self):
self._pg2_count()
@test_parallel(num_threads=2)
def _pg2_last(self):
self.df.groupby('key')['data'].last()
def time_last_2(self):
self._pg2_last()
@test_parallel(num_threads=2)
def _pg2_max(self):
self.df.groupby('key')['data'].max()
def time_max_2(self):
self._pg2_max()
@test_parallel(num_threads=2)
def _pg2_mean(self):
self.df.groupby('key')['data'].mean()
def time_mean_2(self):
self._pg2_mean()
@test_parallel(num_threads=2)
def _pg2_min(self):
self.df.groupby('key')['data'].min()
def time_min_2(self):
self._pg2_min()
@test_parallel(num_threads=2)
def _pg2_prod(self):
self.df.groupby('key')['data'].prod()
def time_prod_2(self):
self._pg2_prod()
@test_parallel(num_threads=2)
def _pg2_sum(self):
self.df.groupby('key')['data'].sum()
def time_sum_2(self):
self._pg2_sum()
@test_parallel(num_threads=4)
def _pg4_sum(self):
self.df.groupby('key')['data'].sum()
def time_sum_4(self):
self._pg4_sum()
def time_sum_4_notp(self):
for i in range(4):
self.df.groupby('key')['data'].sum()
def _f_sum(self):
self.df.groupby('key')['data'].sum()
@test_parallel(num_threads=8)
def _pg8_sum(self):
self._f_sum()
def time_sum_8(self):
self._pg8_sum()
def time_sum_8_notp(self):
for i in range(8):
self._f_sum()
@test_parallel(num_threads=2)
def _pg2_var(self):
self.df.groupby('key')['data'].var()
def time_var_2(self):
self._pg2_var()
# get groups
def _groups(self):
self.data.groupby(self.data).groups
@test_parallel(num_threads=2)
def _pg2_groups(self):
self._groups()
def time_groups_2(self):
self._pg2_groups()
@test_parallel(num_threads=4)
def _pg4_groups(self):
self._groups()
def time_groups_4(self):
self._pg4_groups()
@test_parallel(num_threads=8)
def _pg8_groups(self):
self._groups()
def time_groups_8(self):
self._pg8_groups()
class nogil_take1d_float64(object):
goal_time = 0.2
def setup(self):
self.N = 1000000
self.ngroups = 1000
np.random.seed(1234)
self.df = DataFrame({'key': np.random.randint(0, self.ngroups, size=self.N), 'data': np.random.randn(self.N), })
if (not have_real_test_parallel):
raise NotImplementedError
self.N = 10000000.0
self.df = DataFrame({'int64': np.arange(self.N, dtype='int64'), 'float64': np.arange(self.N, dtype='float64'), })
self.indexer = np.arange(100, (len(self.df) - 100))
def time_nogil_take1d_float64(self):
self.take_1d_pg2_int64()
@test_parallel(num_threads=2)
def take_1d_pg2_int64(self):
take_1d(self.df.int64.values, self.indexer)
@test_parallel(num_threads=2)
def take_1d_pg2_float64(self):
take_1d(self.df.float64.values, self.indexer)
class nogil_take1d_int64(object):
goal_time = 0.2
def setup(self):
self.N = 1000000
self.ngroups = 1000
np.random.seed(1234)
self.df = DataFrame({'key': np.random.randint(0, self.ngroups, size=self.N), 'data': np.random.randn(self.N), })
if (not have_real_test_parallel):
raise NotImplementedError
self.N = 10000000.0
self.df = DataFrame({'int64': np.arange(self.N, dtype='int64'), 'float64': np.arange(self.N, dtype='float64'), })
self.indexer = np.arange(100, (len(self.df) - 100))
def time_nogil_take1d_int64(self):
self.take_1d_pg2_float64()
@test_parallel(num_threads=2)
def take_1d_pg2_int64(self):
take_1d(self.df.int64.values, self.indexer)
@test_parallel(num_threads=2)
def take_1d_pg2_float64(self):
take_1d(self.df.float64.values, self.indexer)
class nogil_kth_smallest(object):
number = 1
repeat = 5
def setup(self):
if (not have_real_test_parallel):
raise NotImplementedError
np.random.seed(1234)
self.N = 10000000
self.k = 500000
self.a = np.random.randn(self.N)
self.b = self.a.copy()
self.kwargs_list = [{'arr': self.a}, {'arr': self.b}]
def time_nogil_kth_smallest(self):
@test_parallel(num_threads=2, kwargs_list=self.kwargs_list)
def run(arr):
algos.kth_smallest(arr, self.k)
run()
class nogil_datetime_fields(object):
goal_time = 0.2
def setup(self):
self.N = 100000000
self.dti = pd.date_range('1900-01-01', periods=self.N, freq='T')
self.period = self.dti.to_period('D')
if (not have_real_test_parallel):
raise NotImplementedError
def time_datetime_field_year(self):
@test_parallel(num_threads=2)
def run(dti):
dti.year
run(self.dti)
def time_datetime_field_day(self):
@test_parallel(num_threads=2)
def run(dti):
dti.day
run(self.dti)
def time_datetime_field_daysinmonth(self):
@test_parallel(num_threads=2)
def run(dti):
dti.days_in_month
run(self.dti)
def time_datetime_field_normalize(self):
@test_parallel(num_threads=2)
def run(dti):
dti.normalize()
run(self.dti)
def time_datetime_to_period(self):
@test_parallel(num_threads=2)
def run(dti):
dti.to_period('S')
run(self.dti)
def time_period_to_datetime(self):
@test_parallel(num_threads=2)
def run(period):
period.to_timestamp()
run(self.period)
class nogil_rolling_algos_slow(object):
goal_time = 0.2
def setup(self):
self.win = 100
np.random.seed(1234)
self.arr = np.random.rand(100000)
if (not have_real_test_parallel):
raise NotImplementedError
def time_nogil_rolling_median(self):
@test_parallel(num_threads=2)
def run(arr, win):
rolling_median(arr, win)
run(self.arr, self.win)
class nogil_rolling_algos_fast(object):
goal_time = 0.2
def setup(self):
self.win = 100
np.random.seed(1234)
self.arr = np.random.rand(1000000)
if (not have_real_test_parallel):
raise NotImplementedError
def time_nogil_rolling_mean(self):
@test_parallel(num_threads=2)
def run(arr, win):
rolling_mean(arr, win)
run(self.arr, self.win)
def time_nogil_rolling_min(self):
@test_parallel(num_threads=2)
def run(arr, win):
rolling_min(arr, win)
run(self.arr, self.win)
def time_nogil_rolling_max(self):
@test_parallel(num_threads=2)
def run(arr, win):
rolling_max(arr, win)
run(self.arr, self.win)
def time_nogil_rolling_var(self):
@test_parallel(num_threads=2)
def run(arr, win):
rolling_var(arr, win)
run(self.arr, self.win)
def time_nogil_rolling_skew(self):
@test_parallel(num_threads=2)
def run(arr, win):
rolling_skew(arr, win)
run(self.arr, self.win)
def time_nogil_rolling_kurt(self):
@test_parallel(num_threads=2)
def run(arr, win):
rolling_kurt(arr, win)
run(self.arr, self.win)
def time_nogil_rolling_std(self):
@test_parallel(num_threads=2)
def run(arr, win):
rolling_std(arr, win)
run(self.arr, self.win)
class nogil_read_csv(object):
number = 1
repeat = 5
def setup(self):
if (not have_real_test_parallel):
raise NotImplementedError
# Using the values
self.df = DataFrame(np.random.randn(10000, 50))
self.df.to_csv('__test__.csv')
self.rng = date_range('1/1/2000', periods=10000)
self.df_date_time = DataFrame(np.random.randn(10000, 50), index=self.rng)
self.df_date_time.to_csv('__test_datetime__.csv')
self.df_object = DataFrame('foo', index=self.df.index, columns=self.create_cols('object'))
self.df_object.to_csv('__test_object__.csv')
def create_cols(self, name):
return [('%s%03d' % (name, i)) for i in range(5)]
@test_parallel(num_threads=2)
def pg_read_csv(self):
read_csv('__test__.csv', sep=',', header=None, float_precision=None)
def time_read_csv(self):
self.pg_read_csv()
@test_parallel(num_threads=2)
def pg_read_csv_object(self):
read_csv('__test_object__.csv', sep=',')
def time_read_csv_object(self):
self.pg_read_csv_object()
@test_parallel(num_threads=2)
def pg_read_csv_datetime(self):
read_csv('__test_datetime__.csv', sep=',', header=None)
def time_read_csv_datetime(self):
self.pg_read_csv_datetime()
class nogil_factorize(object):
number = 1
repeat = 5
def setup(self):
if (not have_real_test_parallel):
raise NotImplementedError
np.random.seed(1234)
self.strings = tm.makeStringIndex(100000)
def factorize_strings(self):
pd.factorize(self.strings)
@test_parallel(num_threads=4)
def _pg_factorize_strings_4(self):
self.factorize_strings()
def time_factorize_strings_4(self):
for i in range(2):
self._pg_factorize_strings_4()
@test_parallel(num_threads=2)
def _pg_factorize_strings_2(self):
self.factorize_strings()
def time_factorize_strings_2(self):
for i in range(4):
self._pg_factorize_strings_2()
def time_factorize_strings(self):
for i in range(8):
self.factorize_strings()
| bsd-3-clause |
SophieIPP/openfisca-france | openfisca_france/model/prelevements_obligatoires/prelevements_sociaux/cotisations_sociales/travail_fonction_publique.py | 1 | 13483 | # -*- coding: utf-8 -*-
from __future__ import division
import math
from numpy import minimum as min_
from ....base import * # noqa analysis:ignore
from .base import apply_bareme_for_relevant_type_sal
class allocations_temporaires_invalidite(Variable):
column = FloatCol
entity_class = Individus
label = u"Allocations temporaires d'invalidité (ATI, fonction publique et collectivités locales)"
# patronale, non-contributive
def function(self, simulation, period):
period = period.this_month
assiette_cotisations_sociales_public = simulation.calculate('assiette_cotisations_sociales_public', period)
plafond_securite_sociale = simulation.calculate('plafond_securite_sociale', period)
type_sal = simulation.calculate('type_sal', period)
_P = simulation.legislation_at(period.start)
base = assiette_cotisations_sociales_public
cotisation_etat = apply_bareme_for_relevant_type_sal(
bareme_by_type_sal_name = _P.cotsoc.cotisations_employeur,
bareme_name = "ati",
base = base,
plafond_securite_sociale = plafond_securite_sociale,
type_sal = type_sal,
)
cotisation_collectivites_locales = apply_bareme_for_relevant_type_sal(
bareme_by_type_sal_name = _P.cotsoc.cotisations_employeur,
bareme_name = "atiacl",
base = base,
plafond_securite_sociale = plafond_securite_sociale,
type_sal = type_sal,
)
return period, cotisation_etat + cotisation_collectivites_locales
class assiette_cotisations_sociales_public(Variable):
column = FloatCol
entity_class = Individus
label = u"Assiette des cotisations sociales des agents titulaires de la fonction publique"
# TODO: gestion des heures supplémentaires
def function(self, simulation, period):
remuneration_principale = simulation.calculate('remuneration_principale', period)
# primes_fonction_publique = simulation.calculate('primes_fonction_publique', period)
# indemnite_residence = simulation.calculate('indemnite_residence', period)
type_sal = simulation.calculate('type_sal', period)
public = (type_sal >= 2)
# titulaire = (type_sal >= 2) * (type_sal <= 5)
assiette = public * (
remuneration_principale
# + not_(titulaire) * (indemnite_residence + primes_fonction_publique)
)
return period, assiette
# sft dans assiette csg et RAFP et Cotisation exceptionnelle de solidarité et taxe sur les salaires
# primes dont indemnites de residences idem sft
# avantages en nature contrib exceptionnelle de solidarite, RAFP, CSG, CRDS.
class contribution_exceptionnelle_solidarite(Variable):
column = FloatCol
entity_class = Individus
label = u"Cotisation exceptionnelle au fonds de solidarité (salarié)"
def function(self, simulation, period):
period = period.this_month
traitement_indiciaire_brut = simulation.calculate('traitement_indiciaire_brut', period)
hsup = simulation.calculate('hsup', period)
type_sal = simulation.calculate('type_sal', period)
indemnite_residence = simulation.calculate('indemnite_residence', period)
primes_fonction_publique = simulation.calculate('primes_fonction_publique', period)
rafp_salarie = simulation.calculate('rafp_salarie', period)
pension_civile_salarie = simulation.calculate('pension_civile_salarie', period)
cotisations_salariales_contributives = simulation.calculate('cotisations_salariales_contributives', period)
plafond_securite_sociale = simulation.calculate('plafond_securite_sociale', period)
salaire_de_base = simulation.calculate('salaire_de_base', period)
_P = simulation.legislation_at(period.start)
seuil_assuj_fds = seuil_fds(_P)
assujettis = (
(type_sal == CAT['public_titulaire_etat']) +
(type_sal == CAT['public_titulaire_territoriale']) +
(type_sal == CAT['public_titulaire_hospitaliere']) +
(type_sal == CAT['public_non_titulaire'])
) * (
(traitement_indiciaire_brut + salaire_de_base - hsup) > seuil_assuj_fds
)
# TODO: check assiette voir IPP
cotisation = apply_bareme_for_relevant_type_sal(
bareme_by_type_sal_name = _P.cotsoc.cotisations_salarie,
bareme_name = "excep_solidarite",
base = assujettis * min_(
(
traitement_indiciaire_brut + salaire_de_base - hsup + indemnite_residence + rafp_salarie +
pension_civile_salarie +
primes_fonction_publique +
(type_sal == CAT['public_non_titulaire']) * cotisations_salariales_contributives
),
_P.cotsoc.sal.fonc.commun.plafond_base_solidarite,
),
plafond_securite_sociale = plafond_securite_sociale,
type_sal = type_sal,
)
return period, cotisation
class fonds_emploi_hospitalier(Variable):
column = FloatCol
entity_class = Individus
label = u"Fonds pour l'emploi hospitalier (employeur)"
def function(self, simulation, period):
period = period.this_month
assiette_cotisations_sociales_public = simulation.calculate('assiette_cotisations_sociales_public', period)
plafond_securite_sociale = simulation.calculate('plafond_securite_sociale', period)
type_sal = simulation.calculate('type_sal', period)
_P = simulation.legislation_at(period.start)
cotisation = apply_bareme_for_relevant_type_sal(
bareme_by_type_sal_name = _P.cotsoc.cotisations_employeur,
bareme_name = "feh",
base = assiette_cotisations_sociales_public, # TODO: check base
plafond_securite_sociale = plafond_securite_sociale,
type_sal = type_sal,
)
return period, cotisation
class ircantec_salarie(Variable):
column = FloatCol
entity_class = Individus
label = u"Ircantec salarié"
def function(self, simulation, period):
period = period.this_month
assiette_cotisations_sociales = simulation.calculate('assiette_cotisations_sociales', period)
plafond_securite_sociale = simulation.calculate('plafond_securite_sociale', period)
type_sal = simulation.calculate('type_sal', period)
_P = simulation.legislation_at(period.start)
ircantec = apply_bareme_for_relevant_type_sal(
bareme_by_type_sal_name = _P.cotsoc.cotisations_salarie,
bareme_name = "ircantec",
base = assiette_cotisations_sociales,
plafond_securite_sociale = plafond_securite_sociale,
type_sal = type_sal,
)
return period, ircantec
class ircantec_employeur(Variable):
column = FloatCol
entity_class = Individus
label = u"Ircantec employeur"
def function(self, simulation, period):
period = period.this_month
assiette_cotisations_sociales = simulation.calculate('assiette_cotisations_sociales', period)
plafond_securite_sociale = simulation.calculate('plafond_securite_sociale', period)
type_sal = simulation.calculate('type_sal', period)
_P = simulation.legislation_at(period.start)
ircantec = apply_bareme_for_relevant_type_sal(
bareme_by_type_sal_name = _P.cotsoc.cotisations_employeur,
bareme_name = "ircantec",
base = assiette_cotisations_sociales,
plafond_securite_sociale = plafond_securite_sociale,
type_sal = type_sal,
)
return period, ircantec
class pension_civile_salarie(Variable):
column = FloatCol
entity_class = Individus
label = u"Pension civile salarié"
url = u"http://www.ac-besancon.fr/spip.php?article2662",
def function(self, simulation, period):
period = period.this_month
traitement_indiciaire_brut = simulation.calculate('traitement_indiciaire_brut', period) # TODO: check nbi
type_sal = simulation.calculate('type_sal', period)
_P = simulation.legislation_at(period.start)
sal = _P.cotsoc.cotisations_salarie
terr_or_hosp = (
type_sal == CAT['public_titulaire_territoriale']) | (type_sal == CAT['public_titulaire_hospitaliere'])
pension_civile_salarie = (
(type_sal == CAT['public_titulaire_etat']) *
sal['public_titulaire_etat']['pension'].calc(traitement_indiciaire_brut) +
terr_or_hosp * sal['public_titulaire_territoriale']['cnracl1'].calc(traitement_indiciaire_brut)
)
return period, -pension_civile_salarie
class pension_civile_employeur(Variable):
column = FloatCol
entity_class = Individus
label = u"Cotisation patronale pension civile"
url = u"http://www.ac-besancon.fr/spip.php?article2662"
def function(self, simulation, period):
period = period.this_month
assiette_cotisations_sociales_public = simulation.calculate('assiette_cotisations_sociales_public', period)
# plafond_securite_sociale = simulation.calculate('plafond_securite_sociale', period)
type_sal = simulation.calculate('type_sal', period)
_P = simulation.legislation_at(period.start)
pat = _P.cotsoc.cotisations_employeur
terr_or_hosp = (
(type_sal == CAT['public_titulaire_territoriale']) | (type_sal == CAT['public_titulaire_hospitaliere'])
)
cot_pat_pension_civile = (
(type_sal == CAT['public_titulaire_etat']) * pat['public_titulaire_etat']['pension'].calc(
assiette_cotisations_sociales_public) +
terr_or_hosp * pat['public_titulaire_territoriale']['cnracl'].calc(assiette_cotisations_sociales_public)
)
return period, -cot_pat_pension_civile
class rafp_salarie(DatedVariable):
column = FloatCol
entity_class = Individus
label = u"Part salariale de la retraite additionelle de la fonction publique"
# Part salariale de la retraite additionelle de la fonction publique
# TODO: ajouter la gipa qui n'est pas affectée par le plafond d'assiette
@dated_function(start = date(2005, 1, 1))
def function(self, simulation, period):
period = period.this_month
traitement_indiciaire_brut = simulation.calculate('traitement_indiciaire_brut', period)
type_sal = simulation.calculate('type_sal', period)
primes_fonction_publique = simulation.calculate('primes_fonction_publique', period)
supp_familial_traitement = simulation.calculate('supp_familial_traitement', period)
indemnite_residence = simulation.calculate('indemnite_residence', period)
_P = simulation.legislation_at(period.start)
eligible = ((type_sal == CAT['public_titulaire_etat'])
+ (type_sal == CAT['public_titulaire_territoriale'])
+ (type_sal == CAT['public_titulaire_hospitaliere']))
plaf_ass = _P.cotsoc.sal.fonc.etat.rafp_plaf_assiette
base_imposable = primes_fonction_publique + supp_familial_traitement + indemnite_residence
assiette = min_(base_imposable, plaf_ass * traitement_indiciaire_brut * eligible)
# Même régime pour les fonctions publiques d'Etat et des collectivité locales
rafp_salarie = eligible * _P.cotsoc.cotisations_salarie.public_titulaire_etat['rafp'].calc(assiette)
return period, -rafp_salarie
class rafp_employeur(DatedVariable):
column = FloatCol
entity_class = Individus
label = u"Part patronale de la retraite additionnelle de la fonction publique"
# TODO: ajouter la gipa qui n'est pas affectée par le plafond d'assiette
@dated_function(start = date(2005, 1, 1))
def function(self, simulation, period):
period = period.this_month
traitement_indiciaire_brut = simulation.calculate('traitement_indiciaire_brut', period)
type_sal = simulation.calculate('type_sal', period)
primes_fonction_publique = simulation.calculate('primes_fonction_publique', period)
supp_familial_traitement = simulation.calculate('supp_familial_traitement', period)
indemnite_residence = simulation.calculate('indemnite_residence', period)
_P = simulation.legislation_at(period.start)
eligible = (
(type_sal == CAT['public_titulaire_etat']) +
(type_sal == CAT['public_titulaire_territoriale']) +
(type_sal == CAT['public_titulaire_hospitaliere'])
)
plaf_ass = _P.cotsoc.sal.fonc.etat.rafp_plaf_assiette
base_imposable = primes_fonction_publique + supp_familial_traitement + indemnite_residence
assiette = min_(base_imposable, plaf_ass * traitement_indiciaire_brut * eligible)
bareme_rafp = _P.cotsoc.cotisations_employeur.public_titulaire_etat['rafp']
rafp_employeur = eligible * bareme_rafp.calc(assiette)
return period, - rafp_employeur
def seuil_fds(law):
'''
Calcul du seuil mensuel d'assujetissement à la contribution au fond de solidarité
'''
ind_maj_ref = law.cotsoc.sal.fonc.commun.ind_maj_ref
pt_ind_mensuel = law.cotsoc.sal.fonc.commun.pt_ind / 12
seuil_mensuel = math.floor((pt_ind_mensuel * ind_maj_ref))
return seuil_mensuel
| agpl-3.0 |
kingvuplus/gui_test5 | lib/python/Plugins/SystemPlugins/DiseqcTester/plugin.py | 63 | 27159 | import random
from Screens.Satconfig import NimSelection
from Screens.Screen import Screen
from Screens.TextBox import TextBox
from Screens.MessageBox import MessageBox
from Plugins.Plugin import PluginDescriptor
from Components.ActionMap import ActionMap, NumberActionMap
from Components.NimManager import nimmanager
from Components.ResourceManager import resourcemanager
from Components.TuneTest import TuneTest
from Components.Sources.List import List
from Components.Sources.Progress import Progress
from Components.Sources.StaticText import StaticText
from Components.ConfigList import ConfigListScreen
from Components.config import getConfigListEntry, ConfigSelection, ConfigYesNo
from Components.Harddisk import harddiskmanager
# always use:
# setResultType(type)
# setResultParameter(parameter)
# getTextualResult()
class ResultParser:
def __init__(self):
pass
TYPE_BYORBPOS = 0
TYPE_BYINDEX = 1
TYPE_ALL = 2
def setResultType(self, type):
self.type = type
def setResultParameter(self, parameter):
if self.type == self.TYPE_BYORBPOS:
self.orbpos = parameter
elif self.type == self.TYPE_BYINDEX:
self.index = parameter
def getTextualResultForIndex(self, index, logfulltransponders = False):
text = ""
text += "%s:\n" % self.getTextualIndexRepresentation(index)
failed, successful = self.results[index]["failed"], self.results[index]["successful"]
countfailed = len(failed)
countsuccessful = len(successful)
countall = countfailed + countsuccessful
percentfailed = round(countfailed / float(countall + 0.0001) * 100)
percentsuccessful = round(countsuccessful / float(countall + 0.0001) * 100)
text += "Tested %d transponders\n%d (%d %%) transponders succeeded\n%d (%d %%) transponders failed\n" % (countall, countsuccessful, percentsuccessful, countfailed, percentfailed)
reasons = {}
completelist = []
if countfailed > 0:
for transponder in failed:
completelist.append({"transponder": transponder[0], "fedata": transponder[-1]})
reasons[transponder[2]] = reasons.get(transponder[2], [])
reasons[transponder[2]].append(transponder)
if transponder[2] == "pids_failed":
print transponder[2], "-", transponder[3]
text += "The %d unsuccessful tuning attempts failed for the following reasons:\n" % countfailed
for reason in reasons.keys():
text += "%s: %d transponders failed\n" % (reason, len(reasons[reason]))
for reason in reasons.keys():
text += "\n"
text += "%s previous planes:\n" % reason
for transponder in reasons[reason]:
if transponder[1] is not None:
text += self.getTextualIndexRepresentation(self.getIndexForTransponder(transponder[1]))
else:
text += "No transponder tuned"
text += " ==> " + self.getTextualIndexRepresentation(self.getIndexForTransponder(transponder[0]))
text += "\n"
if logfulltransponders:
text += str(transponder[1])
text += " ==> "
text += str(transponder[0])
text += "\n"
if reason == "pids_failed":
text += "(tsid, onid): "
text += str(transponder[3]['real'])
text += "(read from sat) != "
text += str(transponder[3]['expected'])
text += "(read from file)"
text += "\n"
text += "\n"
if countsuccessful > 0:
text += "\n"
text += "Successfully tuned transponders' previous planes:\n"
for transponder in successful:
completelist.append({"transponder": transponder[0], "fedata": transponder[-1]})
if transponder[1] is not None:
text += self.getTextualIndexRepresentation(self.getIndexForTransponder(transponder[1]))
else:
text += "No transponder tuned"
text += " ==> " + self.getTextualIndexRepresentation(self.getIndexForTransponder(transponder[0]))
text += "\n"
text += "------------------------------------------------\n"
text += "complete transponderlist:\n"
for entry in completelist:
text += str(entry["transponder"]) + " -- " + str(entry["fedata"]) + "\n"
return text
def getTextualResult(self):
text = ""
if self.type == self.TYPE_BYINDEX:
text += self.getTextualResultForIndex(self.index)
elif self.type == self.TYPE_BYORBPOS:
for index in self.results.keys():
if index[2] == self.orbpos:
text += self.getTextualResultForIndex(index)
text += "\n-----------------------------------------------------\n"
elif self.type == self.TYPE_ALL:
orderedResults = {}
for index in self.results.keys():
orbpos = index[2]
orderedResults[orbpos] = orderedResults.get(orbpos, [])
orderedResults[orbpos].append(index)
ordered_orbpos = orderedResults.keys()
ordered_orbpos.sort()
for orbpos in ordered_orbpos:
text += "\n*****************************************\n"
text += "Orbital position %s:" % str(orbpos)
text += "\n*****************************************\n"
for index in orderedResults[orbpos]:
text += self.getTextualResultForIndex(index, logfulltransponders = True)
text += "\n-----------------------------------------------------\n"
return text
class DiseqcTester(Screen, TuneTest, ResultParser):
skin = """
<screen position="90,100" size="520,400" title="DiSEqC Tester" >
<!--ePixmap pixmap="icons/dish_scan.png" position="5,25" zPosition="0" size="119,110" transparent="1" alphatest="on" />
<widget source="Frontend" render="Label" position="190,10" zPosition="2" size="260,20" font="Regular;19" halign="center" valign="center" transparent="1">
<convert type="FrontendInfo">SNRdB</convert>
</widget>
<eLabel name="snr" text="SNR:" position="120,35" size="60,22" font="Regular;21" halign="right" transparent="1" />
<widget source="Frontend" render="Progress" position="190,35" size="260,20" pixmap="bar_snr.png" borderWidth="2" borderColor="#cccccc">
<convert type="FrontendInfo">SNR</convert>
</widget>
<widget source="Frontend" render="Label" position="460,35" size="60,22" font="Regular;21">
<convert type="FrontendInfo">SNR</convert>
</widget>
<eLabel name="agc" text="AGC:" position="120,60" size="60,22" font="Regular;21" halign="right" transparent="1" />
<widget source="Frontend" render="Progress" position="190,60" size="260,20" pixmap="bar_snr.png" borderWidth="2" borderColor="#cccccc">
<convert type="FrontendInfo">AGC</convert>
</widget>
<widget source="Frontend" render="Label" position="460,60" size="60,22" font="Regular;21">
<convert type="FrontendInfo">AGC</convert>
</widget>
<eLabel name="ber" text="BER:" position="120,85" size="60,22" font="Regular;21" halign="right" transparent="1" />
<widget source="Frontend" render="Progress" position="190,85" size="260,20" pixmap="bar_ber.png" borderWidth="2" borderColor="#cccccc">
<convert type="FrontendInfo">BER</convert>
</widget>
<widget source="Frontend" render="Label" position="460,85" size="60,22" font="Regular;21">
<convert type="FrontendInfo">BER</convert>
</widget>
<eLabel name="lock" text="Lock:" position="120,115" size="60,22" font="Regular;21" halign="right" />
<widget source="Frontend" render="Pixmap" pixmap="icons/lock_on.png" position="190,110" zPosition="1" size="38,31" alphatest="on">
<convert type="FrontendInfo">LOCK</convert>
<convert type="ConditionalShowHide" />
</widget>
<widget source="Frontend" render="Pixmap" pixmap="icons/lock_off.png" position="190,110" zPosition="1" size="38,31" alphatest="on">
<convert type="FrontendInfo">LOCK</convert>
<convert type="ConditionalShowHide">Invert</convert>
</widget-->
<widget source="progress_list" render="Listbox" position="0,0" size="510,150" scrollbarMode="showOnDemand">
<convert type="TemplatedMultiContent">
{"template": [
MultiContentEntryText(pos = (10, 0), size = (330, 25), flags = RT_HALIGN_LEFT, text = 1), # index 1 is the index name,
MultiContentEntryText(pos = (330, 0), size = (150, 25), flags = RT_HALIGN_RIGHT, text = 2) # index 2 is the status,
],
"fonts": [gFont("Regular", 20)],
"itemHeight": 25
}
</convert>
</widget>
<eLabel name="overall_progress" text="Overall progress:" position="20,162" size="480,22" font="Regular;21" halign="center" transparent="1" />
<widget source="overall_progress" render="Progress" position="20,192" size="480,20" borderWidth="2" backgroundColor="#254f7497" />
<eLabel name="overall_progress" text="Progress:" position="20,222" size="480,22" font="Regular;21" halign="center" transparent="1" />
<widget source="sub_progress" render="Progress" position="20,252" size="480,20" borderWidth="2" backgroundColor="#254f7497" />
<eLabel name="" text="Failed:" position="20,282" size="140,22" font="Regular;21" halign="left" transparent="1" />
<widget source="failed_counter" render="Label" position="160,282" size="100,20" font="Regular;21" />
<eLabel name="" text="Succeeded:" position="20,312" size="140,22" font="Regular;21" halign="left" transparent="1" />
<widget source="succeeded_counter" render="Label" position="160,312" size="100,20" font="Regular;21" />
<eLabel name="" text="With errors:" position="20,342" size="140,22" font="Regular;21" halign="left" transparent="1" />
<widget source="witherrors_counter" render="Label" position="160,342" size="100,20" font="Regular;21" />
<eLabel name="" text="Not tested:" position="20,372" size="140,22" font="Regular;21" halign="left" transparent="1" />
<widget source="untestable_counter" render="Label" position="160,372" size="100,20" font="Regular;21" />
<widget source="CmdText" render="Label" position="300,282" size="180,200" font="Regular;21" />
</screen>"""
TEST_TYPE_QUICK = 0
TEST_TYPE_RANDOM = 1
TEST_TYPE_COMPLETE = 2
def __init__(self, session, feid, test_type = TEST_TYPE_QUICK, loopsfailed = 3, loopssuccessful = 1, log = False):
Screen.__init__(self, session)
self.feid = feid
self.test_type = test_type
self.loopsfailed = loopsfailed
self.loopssuccessful = loopssuccessful
self.log = log
self["actions"] = NumberActionMap(["SetupActions"],
{
"ok": self.select,
"cancel": self.keyCancel,
}, -2)
TuneTest.__init__(self, feid, stopOnSuccess = self.loopssuccessful, stopOnError = self.loopsfailed)
#self["Frontend"] = FrontendStatus(frontend_source = lambda : self.frontend, update_interval = 100)
self["overall_progress"] = Progress()
self["sub_progress"] = Progress()
self["failed_counter"] = StaticText("0")
self["succeeded_counter"] = StaticText("0")
self["witherrors_counter"] = StaticText("0")
self["untestable_counter"] = StaticText("0")
self.list = []
self["progress_list"] = List(self.list)
self["progress_list"].onSelectionChanged.append(self.selectionChanged)
self["CmdText"] = StaticText(_("Please wait while scanning is in progress..."))
self.indexlist = {}
self.readTransponderList()
self.running = False
self.results = {}
self.resultsstatus = {}
self.onLayoutFinish.append(self.go)
def getProgressListComponent(self, index, status):
return index, self.getTextualIndexRepresentation(index), status
def clearProgressList(self):
self.list = []
self["progress_list"].list = self.list
def addProgressListItem(self, index):
if index in self.indexlist:
for entry in self.list:
if entry[0] == index:
self.changeProgressListStatus(index, "working")
return
self.list.append(self.getProgressListComponent(index, _("working")))
self["progress_list"].list = self.list
self["progress_list"].setIndex(len(self.list) - 1)
def changeProgressListStatus(self, index, status):
self.newlist = []
count = 0
indexpos = 0
for entry in self.list:
if entry[0] == index:
self.newlist.append(self.getProgressListComponent(index, status))
indexpos = count
else:
self.newlist.append(entry)
count += 1
self.list = self.newlist
self["progress_list"].list = self.list
self["progress_list"].setIndex(indexpos)
def readTransponderList(self):
for sat in nimmanager.getSatListForNim(self.feid):
for transponder in nimmanager.getTransponders(sat[0]):
#print transponder
mytransponder = (transponder[1] / 1000, transponder[2] / 1000, transponder[3], transponder[4], transponder[7], sat[0], transponder[5], transponder[6], transponder[8], transponder[9], transponder[10], transponder[11])
self.analyseTransponder(mytransponder)
def getIndexForTransponder(self, transponder):
if transponder[0] < 11700:
band = 1 # low
else:
band = 0 # high
polarisation = transponder[2]
sat = transponder[5]
index = (band, polarisation, sat)
return index
# sort the transponder into self.transponderlist
def analyseTransponder(self, transponder):
index = self.getIndexForTransponder(transponder)
if index not in self.indexlist:
self.indexlist[index] = []
self.indexlist[index].append(transponder)
#print "self.indexlist:", self.indexlist
# returns a string for the user representing a human readable output for index
def getTextualIndexRepresentation(self, index):
print "getTextualIndexRepresentation:", index
text = ""
text += nimmanager.getSatDescription(index[2]) + ", "
if index[0] == 1:
text += "Low Band, "
else:
text += "High Band, "
if index[1] == 0:
text += "H"
else:
text += "V"
return text
def fillTransponderList(self):
self.clearTransponder()
print "----------- fillTransponderList"
print "index:", self.currentlyTestedIndex
keys = self.indexlist.keys()
if self.getContinueScanning():
print "index:", self.getTextualIndexRepresentation(self.currentlyTestedIndex)
for transponder in self.indexlist[self.currentlyTestedIndex]:
self.addTransponder(transponder)
print "transponderList:", self.transponderlist
return True
else:
return False
def progressCallback(self, progress):
if progress[0] != self["sub_progress"].getRange():
self["sub_progress"].setRange(progress[0])
self["sub_progress"].setValue(progress[1])
# logic for scanning order of transponders
# on go getFirstIndex is called
def getFirstIndex(self):
# TODO use other function to scan more randomly
if self.test_type == self.TEST_TYPE_QUICK:
self.myindex = 0
keys = self.indexlist.keys()
keys.sort(key = lambda a: a[2]) # sort by orbpos
self["overall_progress"].setRange(len(keys))
self["overall_progress"].setValue(self.myindex)
return keys[0]
elif self.test_type == self.TEST_TYPE_RANDOM:
self.randomkeys = self.indexlist.keys()
random.shuffle(self.randomkeys)
self.myindex = 0
self["overall_progress"].setRange(len(self.randomkeys))
self["overall_progress"].setValue(self.myindex)
return self.randomkeys[0]
elif self.test_type == self.TEST_TYPE_COMPLETE:
keys = self.indexlist.keys()
print "keys:", keys
successorindex = {}
for index in keys:
successorindex[index] = []
for otherindex in keys:
if otherindex != index:
successorindex[index].append(otherindex)
random.shuffle(successorindex[index])
self.keylist = []
stop = False
currindex = None
while not stop:
if currindex is None or len(successorindex[currindex]) == 0:
oldindex = currindex
for index in successorindex.keys():
if len(successorindex[index]) > 0:
currindex = index
self.keylist.append(currindex)
break
if currindex == oldindex:
stop = True
else:
currindex = successorindex[currindex].pop()
self.keylist.append(currindex)
print "self.keylist:", self.keylist
self.myindex = 0
self["overall_progress"].setRange(len(self.keylist))
self["overall_progress"].setValue(self.myindex)
return self.keylist[0]
# after each index is finished, getNextIndex is called to get the next index to scan
def getNextIndex(self):
# TODO use other function to scan more randomly
if self.test_type == self.TEST_TYPE_QUICK:
self.myindex += 1
keys = self.indexlist.keys()
keys.sort(key = lambda a: a[2]) # sort by orbpos
self["overall_progress"].setValue(self.myindex)
if self.myindex < len(keys):
return keys[self.myindex]
else:
return None
elif self.test_type == self.TEST_TYPE_RANDOM:
self.myindex += 1
keys = self.randomkeys
self["overall_progress"].setValue(self.myindex)
if self.myindex < len(keys):
return keys[self.myindex]
else:
return None
elif self.test_type == self.TEST_TYPE_COMPLETE:
self.myindex += 1
keys = self.keylist
self["overall_progress"].setValue(self.myindex)
if self.myindex < len(keys):
return keys[self.myindex]
else:
return None
# after each index is finished and the next index is returned by getNextIndex
# the algorithm checks, if we should continue scanning
def getContinueScanning(self):
if self.test_type == self.TEST_TYPE_QUICK or self.test_type == self.TEST_TYPE_RANDOM:
return self.myindex < len(self.indexlist.keys())
elif self.test_type == self.TEST_TYPE_COMPLETE:
return self.myindex < len(self.keylist)
def addResult(self, index, status, failedTune, successfullyTune):
self.results[index] = self.results.get(index, {"failed": [], "successful": [], "status": None, "internalstatus": None})
self.resultsstatus[status] = self.resultsstatus.get(status, [])
oldstatus = self.results[index]["internalstatus"]
if oldstatus is None:
self.results[index]["status"] = status
elif oldstatus == "successful":
if status == "failed":
self.results[index]["status"] = "with_errors"
elif status == "successful":
self.results[index]["status"] = oldstatus
elif status == "with_errors":
self.results[index]["status"] = "with_errors"
elif status == "not_tested":
self.results[index]["status"] = oldstatus
elif oldstatus == "failed":
if status == "failed":
self.results[index]["status"] = oldstatus
elif status == "successful":
self.results[index]["status"] = "with_errors"
elif status == "with_errors":
self.results[index]["status"] = "with_errors"
elif status == "not_tested":
self.results[index]["status"] = oldstatus
elif oldstatus == "with_errors":
if status == "failed":
self.results[index]["status"] = oldstatus
elif status == "successful":
self.results[index]["status"] = oldstatus
elif status == "with_errors":
self.results[index]["status"] = oldstatus
elif status == "not_tested":
self.results[index]["status"] = oldstatus
elif oldstatus == "not_tested":
self.results[index]["status"] = status
if self.results[index]["status"] != "working":
self.results[index]["internalstatus"] = self.results[index]["status"]
self.results[index]["failed"] = failedTune + self.results[index]["failed"]
self.results[index]["successful"] = successfullyTune + self.results[index]["successful"]
self.resultsstatus[status].append(index)
def finishedChecking(self):
print "finishedChecking"
TuneTest.finishedChecking(self)
if not self.results.has_key(self.currentlyTestedIndex):
self.results[self.currentlyTestedIndex] = {"failed": [], "successful": [], "status": None, "internalstatus": None}
if len(self.failedTune) > 0 and len(self.successfullyTune) > 0:
self.changeProgressListStatus(self.currentlyTestedIndex, "with errors")
self["witherrors_counter"].setText(str(int(self["witherrors_counter"].getText()) + 1))
self.addResult(self.currentlyTestedIndex, "with_errors", self.failedTune, self.successfullyTune)
elif len(self.failedTune) == 0 and len(self.successfullyTune) == 0:
self.changeProgressListStatus(self.currentlyTestedIndex, "not tested")
self["untestable_counter"].setText(str(int(self["untestable_counter"].getText()) + 1))
self.addResult(self.currentlyTestedIndex, "untestable", self.failedTune, self.successfullyTune)
elif len(self.failedTune) > 0:
self.changeProgressListStatus(self.currentlyTestedIndex, "failed")
#self["failed_counter"].setText(str(int(self["failed_counter"].getText()) + len(self.failedTune)))
self["failed_counter"].setText(str(int(self["failed_counter"].getText()) + 1))
self.addResult(self.currentlyTestedIndex, "failed", self.failedTune, self.successfullyTune)
else:
self.changeProgressListStatus(self.currentlyTestedIndex, "successful")
#self["succeeded_counter"].setText(str(int(self["succeeded_counter"].getText()) + len(self.successfullyTune)))
self["succeeded_counter"].setText(str(int(self["succeeded_counter"].getText()) + 1))
self.addResult(self.currentlyTestedIndex, "successful", self.failedTune, self.successfullyTune)
#self["failed_counter"].setText(str(int(self["failed_counter"].getText()) + len(self.failedTune)))
#self["succeeded_counter"].setText(str(int(self["succeeded_counter"].getText()) + len(self.successfullyTune)))
#if len(self.failedTune) == 0 and len(self.successfullyTune) == 0:
#self["untestable_counter"].setText(str(int(self["untestable_counter"].getText()) + 1))
self.currentlyTestedIndex = self.getNextIndex()
self.addProgressListItem(self.currentlyTestedIndex)
if self.fillTransponderList():
self.run()
else:
self.running = False
self["progress_list"].setIndex(0)
print "results:", self.results
print "resultsstatus:", self.resultsstatus
if self.log:
file = open("/media/hdd/diseqctester.log", "w")
self.setResultType(ResultParser.TYPE_ALL)
file.write(self.getTextualResult())
file.close()
self.session.open(MessageBox, text=_("The results have been written to %s.") % "/media/hdd/diseqctester.log", type = MessageBox.TYPE_INFO)
def go(self):
self.running = True
self["failed_counter"].setText("0")
self["succeeded_counter"].setText("0")
self["untestable_counter"].setText("0")
self.currentlyTestedIndex = self.getFirstIndex()
self.clearProgressList()
self.addProgressListItem(self.currentlyTestedIndex)
if self.fillTransponderList():
self.run()
def keyCancel(self):
self.close()
def select(self):
print "selectedIndex:", self["progress_list"].getCurrent()[0]
if not self.running:
index = self["progress_list"].getCurrent()[0]
#self.setResultType(ResultParser.TYPE_BYORBPOS)
#self.setResultParameter(index[2])
self.setResultType(ResultParser.TYPE_BYINDEX)
self.setResultParameter(index)
#self.setResultType(ResultParser.TYPE_ALL)
self.session.open(TextBox, self.getTextualResult())
def selectionChanged(self):
print "selection changed"
if len(self.list) > 0 and not self.running:
self["CmdText"].setText(_("Press OK to get further details for %s") % str(self["progress_list"].getCurrent()[1]))
class DiseqcTesterTestTypeSelection(Screen, ConfigListScreen):
def __init__(self, session, feid):
Screen.__init__(self, session)
# for the skin: first try MediaPlayerSettings, then Setup, this allows individual skinning
self.skinName = ["DiseqcTesterTestTypeSelection", "Setup" ]
self.setup_title = _("DiSEqC-tester settings")
self.onChangedEntry = [ ]
self.feid = feid
self.list = []
ConfigListScreen.__init__(self, self.list, session = self.session, on_change = self.changedEntry)
self["actions"] = ActionMap(["SetupActions", "MenuActions"],
{
"cancel": self.keyCancel,
"save": self.keyOK,
"ok": self.keyOK,
"menu": self.closeRecursive,
}, -2)
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("OK"))
self.createSetup()
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.setTitle(self.setup_title)
def createSetup(self):
self.testtype = ConfigSelection(choices={"quick": _("Quick"), "random": _("Random"), "complete": _("Complete")}, default = "quick")
self.testtypeEntry = getConfigListEntry(_("Test type"), self.testtype)
self.list.append(self.testtypeEntry)
self.loopsfailed = ConfigSelection(choices={"-1": "Every known", "1": "1", "2": "2", "3": "3", "4": "4", "5": "5", "6": "6", "7": "7", "8": "8"}, default = "3")
self.loopsfailedEntry = getConfigListEntry(_("Stop testing plane after # failed transponders"), self.loopsfailed)
self.list.append(self.loopsfailedEntry)
self.loopssuccessful = ConfigSelection(choices={"-1": "Every known", "1": "1", "2": "2", "3": "3", "4": "4", "5": "5", "6": "6", "7": "7", "8": "8"}, default = "1")
self.loopssuccessfulEntry = getConfigListEntry(_("Stop testing plane after # successful transponders"), self.loopssuccessful)
self.list.append(self.loopssuccessfulEntry)
self.log = ConfigYesNo(False)
if harddiskmanager.HDDCount() > 0:
self.logEntry = getConfigListEntry(_("Log results to harddisk"), self.log)
self.list.append(self.logEntry)
self["config"].list = self.list
self["config"].l.setList(self.list)
def keyOK(self):
print self.testtype.value
testtype = DiseqcTester.TEST_TYPE_QUICK
if self.testtype.value == "quick":
testtype = DiseqcTester.TEST_TYPE_QUICK
elif self.testtype.value == "random":
testtype = DiseqcTester.TEST_TYPE_RANDOM
elif self.testtype.value == "complete":
testtype = DiseqcTester.TEST_TYPE_COMPLETE
self.session.open(DiseqcTester, feid = self.feid, test_type = testtype, loopsfailed = int(self.loopsfailed.value), loopssuccessful = int(self.loopssuccessful.value), log = self.log.value)
def keyCancel(self):
self.close()
# for summary:
def changedEntry(self):
for x in self.onChangedEntry:
x()
def getCurrentEntry(self):
return self["config"].getCurrent()[0]
def getCurrentValue(self):
return str(self["config"].getCurrent()[1].getText())
def createSummary(self):
from Screens.Setup import SetupSummary
return SetupSummary
class DiseqcTesterNimSelection(NimSelection):
skin = """
<screen position="160,123" size="400,330" title="Select a tuner">
<widget source="nimlist" render="Listbox" position="0,0" size="380,300" scrollbarMode="showOnDemand">
<convert type="TemplatedMultiContent">
{"template": [
MultiContentEntryText(pos = (10, 5), size = (360, 30), flags = RT_HALIGN_LEFT, text = 1), # index 1 is the nim name,
MultiContentEntryText(pos = (50, 30), size = (320, 30), font = 1, flags = RT_HALIGN_LEFT, text = 2), # index 2 is a description of the nim settings,
],
"fonts": [gFont("Regular", 20), gFont("Regular", 15)],
"itemHeight": 70
}
</convert>
</widget>
</screen>"""
def __init__(self, session, args = None):
NimSelection.__init__(self, session)
def setResultClass(self):
#self.resultclass = DiseqcTester
self.resultclass = DiseqcTesterTestTypeSelection
def showNim(self, nim):
nimConfig = nimmanager.getNimConfig(nim.slot)
if nim.isCompatible("DVB-S"):
if nimConfig.configMode.value in ("loopthrough", "equal", "satposdepends", "nothing"):
return False
if nimConfig.configMode.value == "simple":
if nimConfig.diseqcMode.value == "positioner":
return True
return True
return False
def DiseqcTesterMain(session, **kwargs):
session.open(DiseqcTesterNimSelection)
def autostart(reason, **kwargs):
resourcemanager.addResource("DiseqcTester", DiseqcTesterMain)
def Plugins(**kwargs):
return [ PluginDescriptor(name="DiSEqC Tester", description=_("Test DiSEqC settings"), where = PluginDescriptor.WHERE_PLUGINMENU, needsRestart = False, fnc=DiseqcTesterMain),
PluginDescriptor(where = PluginDescriptor.WHERE_AUTOSTART, needsRestart = False, fnc = autostart)]
| gpl-2.0 |
Belxjander/Kirito | Python-3.5.0-main/Lib/multiprocessing/queues.py | 9 | 11166 | #
# Module implementing queues
#
# multiprocessing/queues.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
__all__ = ['Queue', 'SimpleQueue', 'JoinableQueue']
import sys
import os
import threading
import collections
import time
import weakref
import errno
from queue import Empty, Full
import _multiprocessing
from . import connection
from . import context
from .util import debug, info, Finalize, register_after_fork, is_exiting
from .reduction import ForkingPickler
#
# Queue type using a pipe, buffer and thread
#
class Queue(object):
def __init__(self, maxsize=0, *, ctx):
if maxsize <= 0:
# Can raise ImportError (see issues #3770 and #23400)
from .synchronize import SEM_VALUE_MAX as maxsize
self._maxsize = maxsize
self._reader, self._writer = connection.Pipe(duplex=False)
self._rlock = ctx.Lock()
self._opid = os.getpid()
if sys.platform == 'win32':
self._wlock = None
else:
self._wlock = ctx.Lock()
self._sem = ctx.BoundedSemaphore(maxsize)
# For use by concurrent.futures
self._ignore_epipe = False
self._after_fork()
if sys.platform != 'win32':
register_after_fork(self, Queue._after_fork)
def __getstate__(self):
context.assert_spawning(self)
return (self._ignore_epipe, self._maxsize, self._reader, self._writer,
self._rlock, self._wlock, self._sem, self._opid)
def __setstate__(self, state):
(self._ignore_epipe, self._maxsize, self._reader, self._writer,
self._rlock, self._wlock, self._sem, self._opid) = state
self._after_fork()
def _after_fork(self):
debug('Queue._after_fork()')
self._notempty = threading.Condition(threading.Lock())
self._buffer = collections.deque()
self._thread = None
self._jointhread = None
self._joincancelled = False
self._closed = False
self._close = None
self._send_bytes = self._writer.send_bytes
self._recv_bytes = self._reader.recv_bytes
self._poll = self._reader.poll
def put(self, obj, block=True, timeout=None):
assert not self._closed
if not self._sem.acquire(block, timeout):
raise Full
with self._notempty:
if self._thread is None:
self._start_thread()
self._buffer.append(obj)
self._notempty.notify()
def get(self, block=True, timeout=None):
if block and timeout is None:
with self._rlock:
res = self._recv_bytes()
self._sem.release()
else:
if block:
deadline = time.time() + timeout
if not self._rlock.acquire(block, timeout):
raise Empty
try:
if block:
timeout = deadline - time.time()
if timeout < 0 or not self._poll(timeout):
raise Empty
elif not self._poll():
raise Empty
res = self._recv_bytes()
self._sem.release()
finally:
self._rlock.release()
# unserialize the data after having released the lock
return ForkingPickler.loads(res)
def qsize(self):
# Raises NotImplementedError on Mac OSX because of broken sem_getvalue()
return self._maxsize - self._sem._semlock._get_value()
def empty(self):
return not self._poll()
def full(self):
return self._sem._semlock._is_zero()
def get_nowait(self):
return self.get(False)
def put_nowait(self, obj):
return self.put(obj, False)
def close(self):
self._closed = True
try:
self._reader.close()
finally:
close = self._close
if close:
self._close = None
close()
def join_thread(self):
debug('Queue.join_thread()')
assert self._closed
if self._jointhread:
self._jointhread()
def cancel_join_thread(self):
debug('Queue.cancel_join_thread()')
self._joincancelled = True
try:
self._jointhread.cancel()
except AttributeError:
pass
def _start_thread(self):
debug('Queue._start_thread()')
# Start thread which transfers data from buffer to pipe
self._buffer.clear()
self._thread = threading.Thread(
target=Queue._feed,
args=(self._buffer, self._notempty, self._send_bytes,
self._wlock, self._writer.close, self._ignore_epipe),
name='QueueFeederThread'
)
self._thread.daemon = True
debug('doing self._thread.start()')
self._thread.start()
debug('... done self._thread.start()')
# On process exit we will wait for data to be flushed to pipe.
#
# However, if this process created the queue then all
# processes which use the queue will be descendants of this
# process. Therefore waiting for the queue to be flushed
# is pointless once all the child processes have been joined.
created_by_this_process = (self._opid == os.getpid())
if not self._joincancelled and not created_by_this_process:
self._jointhread = Finalize(
self._thread, Queue._finalize_join,
[weakref.ref(self._thread)],
exitpriority=-5
)
# Send sentinel to the thread queue object when garbage collected
self._close = Finalize(
self, Queue._finalize_close,
[self._buffer, self._notempty],
exitpriority=10
)
@staticmethod
def _finalize_join(twr):
debug('joining queue thread')
thread = twr()
if thread is not None:
thread.join()
debug('... queue thread joined')
else:
debug('... queue thread already dead')
@staticmethod
def _finalize_close(buffer, notempty):
debug('telling queue thread to quit')
with notempty:
buffer.append(_sentinel)
notempty.notify()
@staticmethod
def _feed(buffer, notempty, send_bytes, writelock, close, ignore_epipe):
debug('starting thread to feed data to pipe')
nacquire = notempty.acquire
nrelease = notempty.release
nwait = notempty.wait
bpopleft = buffer.popleft
sentinel = _sentinel
if sys.platform != 'win32':
wacquire = writelock.acquire
wrelease = writelock.release
else:
wacquire = None
try:
while 1:
nacquire()
try:
if not buffer:
nwait()
finally:
nrelease()
try:
while 1:
obj = bpopleft()
if obj is sentinel:
debug('feeder thread got sentinel -- exiting')
close()
return
# serialize the data before acquiring the lock
obj = ForkingPickler.dumps(obj)
if wacquire is None:
send_bytes(obj)
else:
wacquire()
try:
send_bytes(obj)
finally:
wrelease()
except IndexError:
pass
except Exception as e:
if ignore_epipe and getattr(e, 'errno', 0) == errno.EPIPE:
return
# Since this runs in a daemon thread the resources it uses
# may be become unusable while the process is cleaning up.
# We ignore errors which happen after the process has
# started to cleanup.
try:
if is_exiting():
info('error in queue thread: %s', e)
else:
import traceback
traceback.print_exc()
except Exception:
pass
_sentinel = object()
#
# A queue type which also supports join() and task_done() methods
#
# Note that if you do not call task_done() for each finished task then
# eventually the counter's semaphore may overflow causing Bad Things
# to happen.
#
class JoinableQueue(Queue):
def __init__(self, maxsize=0, *, ctx):
Queue.__init__(self, maxsize, ctx=ctx)
self._unfinished_tasks = ctx.Semaphore(0)
self._cond = ctx.Condition()
def __getstate__(self):
return Queue.__getstate__(self) + (self._cond, self._unfinished_tasks)
def __setstate__(self, state):
Queue.__setstate__(self, state[:-2])
self._cond, self._unfinished_tasks = state[-2:]
def put(self, obj, block=True, timeout=None):
assert not self._closed
if not self._sem.acquire(block, timeout):
raise Full
with self._notempty, self._cond:
if self._thread is None:
self._start_thread()
self._buffer.append(obj)
self._unfinished_tasks.release()
self._notempty.notify()
def task_done(self):
with self._cond:
if not self._unfinished_tasks.acquire(False):
raise ValueError('task_done() called too many times')
if self._unfinished_tasks._semlock._is_zero():
self._cond.notify_all()
def join(self):
with self._cond:
if not self._unfinished_tasks._semlock._is_zero():
self._cond.wait()
#
# Simplified Queue type -- really just a locked pipe
#
class SimpleQueue(object):
def __init__(self, *, ctx):
self._reader, self._writer = connection.Pipe(duplex=False)
self._rlock = ctx.Lock()
self._poll = self._reader.poll
if sys.platform == 'win32':
self._wlock = None
else:
self._wlock = ctx.Lock()
def empty(self):
return not self._poll()
def __getstate__(self):
context.assert_spawning(self)
return (self._reader, self._writer, self._rlock, self._wlock)
def __setstate__(self, state):
(self._reader, self._writer, self._rlock, self._wlock) = state
def get(self):
with self._rlock:
res = self._reader.recv_bytes()
# unserialize the data after having released the lock
return ForkingPickler.loads(res)
def put(self, obj):
# serialize the data before acquiring the lock
obj = ForkingPickler.dumps(obj)
if self._wlock is None:
# writes to a message oriented win32 pipe are atomic
self._writer.send_bytes(obj)
else:
with self._wlock:
self._writer.send_bytes(obj)
| gpl-3.0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.